ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
93ad1401-b7f7-499b-be17-eb2c1f1b45de | cpp | tensorflow/tensorflow | detection_postprocess | tensorflow/lite/kernels/detection_postprocess.cc | tensorflow/lite/kernels/detection_postprocess_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <initializer_list>
#include <numeric>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace detection_postprocess {
constexpr int kInputTensorBoxEncodings = 0;
constexpr int kInputTensorClassPredictions = 1;
constexpr int kInputTensorAnchors = 2;
constexpr int kOutputTensorDetectionBoxes = 0;
constexpr int kOutputTensorDetectionClasses = 1;
constexpr int kOutputTensorDetectionScores = 2;
constexpr int kOutputTensorNumDetections = 3;
constexpr int kNumCoordBox = 4;
constexpr int kBatchSize = 1;
constexpr int kNumDetectionsPerClass = 100;
struct BoxCornerEncoding {
float ymin;
float xmin;
float ymax;
float xmax;
};
struct CenterSizeEncoding {
float y;
float x;
float h;
float w;
};
static_assert(sizeof(BoxCornerEncoding) == sizeof(float) * kNumCoordBox,
"Size of BoxCornerEncoding is 4 float values");
static_assert(sizeof(CenterSizeEncoding) == sizeof(float) * kNumCoordBox,
"Size of CenterSizeEncoding is 4 float values");
struct OpData {
int max_detections;
int max_classes_per_detection;
int detections_per_class;
float non_max_suppression_score_threshold;
float intersection_over_union_threshold;
int num_classes;
bool use_regular_non_max_suppression;
CenterSizeEncoding scale_values;
int decoded_boxes_index;
int scores_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
op_data->max_detections = m["max_detections"].AsInt32();
op_data->max_classes_per_detection = m["max_classes_per_detection"].AsInt32();
if (m["detections_per_class"].IsNull())
op_data->detections_per_class = kNumDetectionsPerClass;
else
op_data->detections_per_class = m["detections_per_class"].AsInt32();
if (m["use_regular_nms"].IsNull())
op_data->use_regular_non_max_suppression = false;
else
op_data->use_regular_non_max_suppression = m["use_regular_nms"].AsBool();
op_data->non_max_suppression_score_threshold =
m["nms_score_threshold"].AsFloat();
op_data->intersection_over_union_threshold = m["nms_iou_threshold"].AsFloat();
op_data->num_classes = m["num_classes"].AsInt32();
op_data->scale_values.y = m["y_scale"].AsFloat();
op_data->scale_values.x = m["x_scale"].AsFloat();
op_data->scale_values.h = m["h_scale"].AsFloat();
op_data->scale_values.w = m["w_scale"].AsFloat();
context->AddTensors(context, 1, &op_data->decoded_boxes_index);
context->AddTensors(context, 1, &op_data->scores_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus SetTensorSizes(TfLiteContext* context, TfLiteTensor* tensor,
std::initializer_list<int> values) {
TfLiteIntArray* size = TfLiteIntArrayCreate(values.size());
int index = 0;
for (const auto& v : values) {
size->data[index] = v;
++index;
}
return context->ResizeTensor(context, tensor, size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* op_data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* input_anchors;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorAnchors,
&input_anchors));
TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2);
const int num_detected_boxes =
op_data->max_detections * op_data->max_classes_per_detection;
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4);
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
detection_boxes->type = kTfLiteFloat32;
SetTensorSizes(context, detection_boxes,
{kBatchSize, num_detected_boxes, kNumCoordBox});
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
detection_classes->type = kTfLiteFloat32;
SetTensorSizes(context, detection_classes, {kBatchSize, num_detected_boxes});
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
detection_scores->type = kTfLiteFloat32;
SetTensorSizes(context, detection_scores, {kBatchSize, num_detected_boxes});
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
num_detections->type = kTfLiteFloat32;
SetTensorSizes(context, num_detections, {1});
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(2);
node->temporaries->data[0] = op_data->decoded_boxes_index;
node->temporaries->data[1] = op_data->scores_index;
TfLiteTensor* decoded_boxes = &context->tensors[op_data->decoded_boxes_index];
decoded_boxes->type = kTfLiteFloat32;
decoded_boxes->allocation_type = kTfLiteArenaRw;
SetTensorSizes(context, decoded_boxes,
{input_box_encodings->dims->data[1], kNumCoordBox});
TfLiteTensor* scores = &context->tensors[op_data->scores_index];
scores->type = kTfLiteFloat32;
scores->allocation_type = kTfLiteArenaRw;
SetTensorSizes(context, scores,
{input_class_predictions->dims->data[1],
input_class_predictions->dims->data[2]});
return kTfLiteOk;
}
class Dequantizer {
public:
Dequantizer(int zero_point, float scale)
: zero_point_(zero_point), scale_(scale) {}
float operator()(uint8 x) {
return (static_cast<float>(x) - zero_point_) * scale_;
}
private:
int zero_point_;
float scale_;
};
void DequantizeBoxEncodings(const TfLiteTensor* input_box_encodings, int idx,
float quant_zero_point, float quant_scale,
int length_box_encoding,
CenterSizeEncoding* box_centersize) {
const uint8* boxes =
GetTensorData<uint8>(input_box_encodings) + length_box_encoding * idx;
Dequantizer dequantize(quant_zero_point, quant_scale);
box_centersize->y = dequantize(boxes[0]);
box_centersize->x = dequantize(boxes[1]);
box_centersize->h = dequantize(boxes[2]);
box_centersize->w = dequantize(boxes[3]);
}
template <class T>
T ReInterpretTensor(const TfLiteTensor* tensor) {
const float* tensor_base = GetTensorData<float>(tensor);
return reinterpret_cast<T>(tensor_base);
}
template <class T>
T ReInterpretTensor(TfLiteTensor* tensor) {
float* tensor_base = GetTensorData<float>(tensor);
return reinterpret_cast<T>(tensor_base);
}
TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node,
OpData* op_data) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize);
const int num_boxes = input_box_encodings->dims->data[1];
TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox);
const TfLiteTensor* input_anchors;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorAnchors,
&input_anchors));
CenterSizeEncoding box_centersize;
CenterSizeEncoding scale_values = op_data->scale_values;
CenterSizeEncoding anchor;
for (int idx = 0; idx < num_boxes; ++idx) {
switch (input_box_encodings->type) {
case kTfLiteUInt8:
DequantizeBoxEncodings(
input_box_encodings, idx,
static_cast<float>(input_box_encodings->params.zero_point),
static_cast<float>(input_box_encodings->params.scale),
input_box_encodings->dims->data[2], &box_centersize);
DequantizeBoxEncodings(
input_anchors, idx,
static_cast<float>(input_anchors->params.zero_point),
static_cast<float>(input_anchors->params.scale), kNumCoordBox,
&anchor);
break;
case kTfLiteFloat32: {
const int box_encoding_idx = idx * input_box_encodings->dims->data[2];
const float* boxes =
&(GetTensorData<float>(input_box_encodings)[box_encoding_idx]);
box_centersize = *reinterpret_cast<const CenterSizeEncoding*>(boxes);
TF_LITE_ENSURE_EQ(context, input_anchors->type, kTfLiteFloat32);
anchor =
ReInterpretTensor<const CenterSizeEncoding*>(input_anchors)[idx];
break;
}
default:
return kTfLiteError;
}
float ycenter = static_cast<float>(static_cast<double>(box_centersize.y) /
static_cast<double>(scale_values.y) *
static_cast<double>(anchor.h) +
static_cast<double>(anchor.y));
float xcenter = static_cast<float>(static_cast<double>(box_centersize.x) /
static_cast<double>(scale_values.x) *
static_cast<double>(anchor.w) +
static_cast<double>(anchor.x));
float half_h =
static_cast<float>(0.5 *
(std::exp(static_cast<double>(box_centersize.h) /
static_cast<double>(scale_values.h))) *
static_cast<double>(anchor.h));
float half_w =
static_cast<float>(0.5 *
(std::exp(static_cast<double>(box_centersize.w) /
static_cast<double>(scale_values.w))) *
static_cast<double>(anchor.w));
TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
auto& box = ReInterpretTensor<BoxCornerEncoding*>(decoded_boxes)[idx];
box.ymin = ycenter - half_h;
box.xmin = xcenter - half_w;
box.ymax = ycenter + half_h;
box.xmax = xcenter + half_w;
}
return kTfLiteOk;
}
void DecreasingPartialArgSort(const float* values, int num_values,
int num_to_sort, int* indices) {
if (num_to_sort == 1) {
indices[0] = optimized_ops::ArgMaxVector(values, num_values);
} else {
std::iota(indices, indices + num_values, 0);
std::partial_sort(
indices, indices + num_to_sort, indices + num_values,
[&values](const int i, const int j) { return values[i] > values[j]; });
}
}
void DecreasingArgSort(const float* values, int num_values, int* indices) {
std::iota(indices, indices + num_values, 0);
std::stable_sort(
indices, indices + num_values,
[&values](const int i, const int j) { return values[i] > values[j]; });
}
void SelectDetectionsAboveScoreThreshold(const std::vector<float>& values,
const float threshold,
std::vector<float>* keep_values,
std::vector<int>* keep_indices) {
for (int i = 0; i < values.size(); i++) {
if (values[i] >= threshold) {
keep_values->emplace_back(values[i]);
keep_indices->emplace_back(i);
}
}
}
bool ValidateBoxes(const TfLiteTensor* decoded_boxes, const int num_boxes) {
for (int i = 0; i < num_boxes; ++i) {
auto& box = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[i];
if (box.ymin > box.ymax || box.xmin > box.xmax) {
return false;
}
}
return true;
}
float ComputeIntersectionOverUnion(const TfLiteTensor* decoded_boxes,
const int i, const int j) {
auto& box_i = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[i];
auto& box_j = ReInterpretTensor<const BoxCornerEncoding*>(decoded_boxes)[j];
const float area_i = (box_i.ymax - box_i.ymin) * (box_i.xmax - box_i.xmin);
const float area_j = (box_j.ymax - box_j.ymin) * (box_j.xmax - box_j.xmin);
if (area_i <= 0 || area_j <= 0) return 0.0;
const float intersection_ymin = std::max<float>(box_i.ymin, box_j.ymin);
const float intersection_xmin = std::max<float>(box_i.xmin, box_j.xmin);
const float intersection_ymax = std::min<float>(box_i.ymax, box_j.ymax);
const float intersection_xmax = std::min<float>(box_i.xmax, box_j.xmax);
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
TfLiteStatus NonMaxSuppressionSingleClassHelper(
TfLiteContext* context, TfLiteNode* node, OpData* op_data,
const std::vector<float>& scores, int max_detections,
std::vector<int>* selected) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
const int num_boxes = input_box_encodings->dims->data[1];
const float non_max_suppression_score_threshold =
op_data->non_max_suppression_score_threshold;
const float intersection_over_union_threshold =
op_data->intersection_over_union_threshold;
TF_LITE_ENSURE(context, (max_detections >= 0));
TF_LITE_ENSURE(context, (intersection_over_union_threshold > 0.0f) &&
(intersection_over_union_threshold <= 1.0f));
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, ValidateBoxes(decoded_boxes, num_boxes));
std::vector<int> keep_indices;
std::vector<float> keep_scores;
SelectDetectionsAboveScoreThreshold(
scores, non_max_suppression_score_threshold, &keep_scores, &keep_indices);
int num_scores_kept = keep_scores.size();
std::vector<int> sorted_indices;
sorted_indices.resize(num_scores_kept);
DecreasingArgSort(keep_scores.data(), num_scores_kept, sorted_indices.data());
const int num_boxes_kept = num_scores_kept;
const int output_size = std::min(num_boxes_kept, max_detections);
selected->clear();
int num_active_candidate = num_boxes_kept;
std::vector<uint8_t> active_box_candidate(num_boxes_kept, 1);
for (int i = 0; i < num_boxes_kept; ++i) {
if (num_active_candidate == 0 || selected->size() >= output_size) break;
if (active_box_candidate[i] == 1) {
selected->push_back(keep_indices[sorted_indices[i]]);
active_box_candidate[i] = 0;
num_active_candidate--;
} else {
continue;
}
for (int j = i + 1; j < num_boxes_kept; ++j) {
if (active_box_candidate[j] == 1) {
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
float intersection_over_union = ComputeIntersectionOverUnion(
decoded_boxes, keep_indices[sorted_indices[i]],
keep_indices[sorted_indices[j]]);
if (intersection_over_union > intersection_over_union_threshold) {
active_box_candidate[j] = 0;
num_active_candidate--;
}
}
}
}
return kTfLiteOk;
}
struct BoxInfo {
int index;
float score;
};
struct NMSTaskParam {
TfLiteContext* context;
TfLiteNode* node;
OpData* op_data;
const float* scores;
int num_classes;
int num_boxes;
int label_offset;
int num_classes_with_background;
int num_detections_per_class;
int max_detections;
std::vector<int>& num_selected;
};
void InplaceMergeBoxInfo(std::vector<BoxInfo>& boxes, int mid_index,
int end_index) {
std::inplace_merge(
boxes.begin(), boxes.begin() + mid_index, boxes.begin() + end_index,
[](const BoxInfo& a, const BoxInfo& b) { return a.score >= b.score; });
}
TfLiteStatus ComputeNMSResult(const NMSTaskParam& nms_task_param, int col_begin,
int col_end, int& sorted_indices_size,
std::vector<BoxInfo>& resulted_sorted_box_info) {
std::vector<float> class_scores(nms_task_param.num_boxes);
std::vector<int> selected;
selected.reserve(nms_task_param.num_detections_per_class);
for (int col = col_begin; col <= col_end; ++col) {
const float* scores_base =
nms_task_param.scores + col + nms_task_param.label_offset;
for (int row = 0; row < nms_task_param.num_boxes; row++) {
class_scores[row] = *scores_base;
scores_base += nms_task_param.num_classes_with_background;
}
selected.clear();
TF_LITE_ENSURE_OK(
nms_task_param.context,
NonMaxSuppressionSingleClassHelper(
nms_task_param.context, nms_task_param.node, nms_task_param.op_data,
class_scores, nms_task_param.num_detections_per_class, &selected));
if (selected.empty()) {
continue;
}
for (int i = 0; i < selected.size(); ++i) {
resulted_sorted_box_info[sorted_indices_size + i].score =
class_scores[selected[i]];
resulted_sorted_box_info[sorted_indices_size + i].index =
(selected[i] * nms_task_param.num_classes_with_background + col +
nms_task_param.label_offset);
}
InplaceMergeBoxInfo(resulted_sorted_box_info, sorted_indices_size,
sorted_indices_size + selected.size());
sorted_indices_size =
std::min(sorted_indices_size + static_cast<int>(selected.size()),
nms_task_param.max_detections);
}
return kTfLiteOk;
}
struct NonMaxSuppressionWorkerTask : cpu_backend_threadpool::Task {
NonMaxSuppressionWorkerTask(NMSTaskParam& nms_task_param,
std::atomic<int>& next_col, int col_begin)
: nms_task_param(nms_task_param),
next_col(next_col),
col_begin(col_begin),
sorted_indices_size(0) {}
void Run() override {
sorted_box_info.resize(nms_task_param.num_detections_per_class +
nms_task_param.max_detections);
for (int col = col_begin; col < nms_task_param.num_classes;
col = (++next_col)) {
if (ComputeNMSResult(nms_task_param, col, col, sorted_indices_size,
sorted_box_info) != kTfLiteOk) {
break;
}
}
}
NMSTaskParam& nms_task_param;
std::atomic<int>& next_col;
const int col_begin;
int sorted_indices_size;
std::vector<BoxInfo> sorted_box_info;
};
TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context,
TfLiteNode* node,
OpData* op_data,
const float* scores) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
const int num_detections_per_class =
std::min(op_data->detections_per_class, op_data->max_detections);
const int max_detections = op_data->max_detections;
const int num_classes_with_background =
input_class_predictions->dims->data[2];
int label_offset = num_classes_with_background - num_classes;
TF_LITE_ENSURE(context, num_detections_per_class > 0);
int sorted_indices_size = 0;
std::vector<BoxInfo> box_info_after_regular_non_max_suppression(
max_detections + num_detections_per_class);
std::vector<int> num_selected(num_classes);
NMSTaskParam nms_task_param{context,
node,
op_data,
scores,
num_classes,
num_boxes,
label_offset,
num_classes_with_background,
num_detections_per_class,
max_detections,
num_selected};
int num_threads =
CpuBackendContext::GetFromContext(context)->max_num_threads();
if (num_threads == 1) {
TF_LITE_ENSURE_OK(
context, ComputeNMSResult(nms_task_param, 0,
num_classes - 1, sorted_indices_size,
box_info_after_regular_non_max_suppression));
} else {
std::atomic<int> next_col(num_threads);
std::vector<NonMaxSuppressionWorkerTask> tasks;
tasks.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
tasks.emplace_back(
NonMaxSuppressionWorkerTask(nms_task_param, next_col, i));
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
CpuBackendContext::GetFromContext(context));
for (int j = 0; j < tasks.size(); ++j) {
if (tasks[j].sorted_indices_size == 0) {
continue;
}
memcpy(&box_info_after_regular_non_max_suppression[sorted_indices_size],
&tasks[j].sorted_box_info[0],
sizeof(BoxInfo) * tasks[j].sorted_indices_size);
InplaceMergeBoxInfo(box_info_after_regular_non_max_suppression,
sorted_indices_size,
sorted_indices_size + tasks[j].sorted_indices_size);
sorted_indices_size = std::min(
sorted_indices_size + tasks[j].sorted_indices_size, max_detections);
}
}
for (int output_box_index = 0; output_box_index < max_detections;
output_box_index++) {
if (output_box_index < sorted_indices_size) {
const int anchor_index = floor(
box_info_after_regular_non_max_suppression[output_box_index].index /
num_classes_with_background);
const int class_index =
box_info_after_regular_non_max_suppression[output_box_index].index -
anchor_index * num_classes_with_background - label_offset;
const float selected_score =
box_info_after_regular_non_max_suppression[output_box_index].score;
TF_LITE_ENSURE_EQ(context, detection_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
ReInterpretTensor<BoxCornerEncoding*>(detection_boxes)[output_box_index] =
ReInterpretTensor<const BoxCornerEncoding*>(
decoded_boxes)[anchor_index];
GetTensorData<float>(detection_classes)[output_box_index] = class_index;
GetTensorData<float>(detection_scores)[output_box_index] = selected_score;
} else {
TF_LITE_ENSURE_EQ(context, detection_boxes->type, kTfLiteFloat32);
ReInterpretTensor<BoxCornerEncoding*>(
detection_boxes)[output_box_index] = {0.0f, 0.0f, 0.0f, 0.0f};
GetTensorData<float>(detection_classes)[output_box_index] = 0.0f;
GetTensorData<float>(detection_scores)[output_box_index] = 0.0f;
}
}
GetTensorData<float>(num_detections)[0] = sorted_indices_size;
box_info_after_regular_non_max_suppression.clear();
return kTfLiteOk;
}
TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context,
TfLiteNode* node,
OpData* op_data,
const float* scores) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
const int max_categories_per_anchor = op_data->max_classes_per_detection;
const int num_classes_with_background =
input_class_predictions->dims->data[2];
int label_offset = num_classes_with_background - num_classes;
TF_LITE_ENSURE(context, (max_categories_per_anchor > 0));
const int num_categories_per_anchor =
std::min(max_categories_per_anchor, num_classes);
std::vector<float> max_scores;
max_scores.resize(num_boxes);
std::vector<int> sorted_class_indices;
sorted_class_indices.resize(num_boxes * num_classes);
for (int row = 0; row < num_boxes; row++) {
const float* box_scores =
scores + row * num_classes_with_background + label_offset;
int* class_indices = sorted_class_indices.data() + row * num_classes;
DecreasingPartialArgSort(box_scores, num_classes, num_categories_per_anchor,
class_indices);
max_scores[row] = box_scores[class_indices[0]];
}
std::vector<int> selected;
TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper(
context, node, op_data, max_scores, op_data->max_detections, &selected));
int output_box_index = 0;
for (const auto& selected_index : selected) {
const float* box_scores =
scores + selected_index * num_classes_with_background + label_offset;
const int* class_indices =
sorted_class_indices.data() + selected_index * num_classes;
for (int col = 0; col < num_categories_per_anchor; ++col) {
int box_offset = max_categories_per_anchor * output_box_index + col;
TF_LITE_ENSURE_EQ(context, detection_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, decoded_boxes->type, kTfLiteFloat32);
ReInterpretTensor<BoxCornerEncoding*>(detection_boxes)[box_offset] =
ReInterpretTensor<const BoxCornerEncoding*>(
decoded_boxes)[selected_index];
GetTensorData<float>(detection_classes)[box_offset] = class_indices[col];
GetTensorData<float>(detection_scores)[box_offset] =
box_scores[class_indices[col]];
}
output_box_index++;
}
GetTensorData<float>(num_detections)[0] = output_box_index;
return kTfLiteOk;
}
void DequantizeClassPredictions(const TfLiteTensor* input_class_predictions,
const int num_boxes,
const int num_classes_with_background,
TfLiteTensor* scores) {
float quant_zero_point =
static_cast<float>(input_class_predictions->params.zero_point);
float quant_scale = static_cast<float>(input_class_predictions->params.scale);
tflite::DequantizationParams op_params;
op_params.zero_point = quant_zero_point;
op_params.scale = quant_scale;
const auto shape = RuntimeShape(1, num_boxes * num_classes_with_background);
optimized_ops::Dequantize(op_params, shape,
GetTensorData<uint8>(input_class_predictions),
shape, GetTensorData<float>(scores));
}
TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context,
TfLiteNode* node, OpData* op_data) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0],
kBatchSize);
TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes);
const int num_classes_with_background =
input_class_predictions->dims->data[2];
TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1));
TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes));
const TfLiteTensor* scores;
switch (input_class_predictions->type) {
case kTfLiteUInt8: {
TfLiteTensor* temporary_scores = &context->tensors[op_data->scores_index];
DequantizeClassPredictions(input_class_predictions, num_boxes,
num_classes_with_background, temporary_scores);
scores = temporary_scores;
} break;
case kTfLiteFloat32:
scores = input_class_predictions;
break;
default:
return kTfLiteError;
}
if (op_data->use_regular_non_max_suppression)
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper(
context, node, op_data, GetTensorData<float>(scores)));
else
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassFastHelper(
context, node, op_data, GetTensorData<float>(scores)));
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, (kBatchSize == 1));
auto* op_data = static_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_STATUS(DecodeCenterSizeBoxes(context, node, op_data));
TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClass(context, node, op_data));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DETECTION_POSTPROCESS() {
static TfLiteRegistration r = {
detection_postprocess::Init, detection_postprocess::Free,
detection_postprocess::Prepare, detection_postprocess::Eval};
return &r;
}
TfLiteRegistration* Register_TFLITE_DETECTION_POST_PROCESS() {
return Register_DETECTION_POSTPROCESS();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_DETECTION_POSTPROCESS();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class BaseDetectionPostprocessOpModel : public SingleOpModel {
public:
BaseDetectionPostprocessOpModel(
const TensorData& input1, const TensorData& input2,
const TensorData& input3, const TensorData& output1,
const TensorData& output2, const TensorData& output3,
const TensorData& output4, int max_classes_per_detection = 1) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
input3_ = AddInput(input3);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
output3_ = AddOutput(output3);
output4_ = AddOutput(output4);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("max_detections", 3);
fbb.Int("max_classes_per_detection", max_classes_per_detection);
fbb.Float("nms_score_threshold", 0.0);
fbb.Float("nms_iou_threshold", 0.5);
fbb.Int("num_classes", 2);
fbb.Float("y_scale", 10.0);
fbb.Float("x_scale", 10.0);
fbb.Float("h_scale", 5.0);
fbb.Float("w_scale", 5.0);
});
fbb.Finish();
SetCustomOp("TFLite_Detection_PostProcess", fbb.GetBuffer(),
Register_DETECTION_POSTPROCESS);
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
int input3() { return input3_; }
template <class T>
void SetInput1(std::initializer_list<T> data) {
PopulateTensor<T>(input1_, data);
}
template <class T>
void SetInput2(std::initializer_list<T> data) {
PopulateTensor<T>(input2_, data);
}
template <class T>
void SetInput3(std::initializer_list<T> data) {
PopulateTensor<T>(input3_, data);
}
template <class T>
std::vector<T> GetOutput1() {
return ExtractVector<T>(output1_);
}
template <class T>
std::vector<T> GetOutput2() {
return ExtractVector<T>(output2_);
}
template <class T>
std::vector<T> GetOutput3() {
return ExtractVector<T>(output3_);
}
template <class T>
std::vector<T> GetOutput4() {
return ExtractVector<T>(output4_);
}
std::vector<int> GetOutputShape1() { return GetTensorShape(output1_); }
std::vector<int> GetOutputShape2() { return GetTensorShape(output2_); }
std::vector<int> GetOutputShape3() { return GetTensorShape(output3_); }
std::vector<int> GetOutputShape4() { return GetTensorShape(output4_); }
protected:
int input1_;
int input2_;
int input3_;
int output1_;
int output2_;
int output3_;
int output4_;
};
TEST(DetectionPostprocessOpTest, FloatTest) {
BaseDetectionPostprocessOpModel m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest, FloatTestWithDegeneratedBox) {
BaseDetectionPostprocessOpModel m(
{TensorType_FLOAT32, {1, 2, 4}}, {TensorType_FLOAT32, {1, 2, 3}},
{TensorType_FLOAT32, {2, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
});
m.SetInput2<float>({
0., .9, .8,
0., .2, .7
});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 0.0, 0.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
const int num_detections = static_cast<int>(m.GetOutput4<float>()[0]);
EXPECT_EQ(num_detections, 2);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
std::vector<float> detection_boxes = m.GetOutput1<float>();
detection_boxes.resize(num_detections * 4);
EXPECT_THAT(detection_boxes,
ElementsAreArray(ArrayFloatNear({0.0, 0.0, 1.0, 1.0,
0.5, 0.5, 0.5, 0.5},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
std::vector<float> detection_classes = m.GetOutput2<float>();
detection_classes.resize(num_detections);
EXPECT_THAT(detection_classes,
ElementsAreArray(ArrayFloatNear({0, 1}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
std::vector<float> detection_scores = m.GetOutput3<float>();
detection_scores.resize(num_detections);
EXPECT_THAT(detection_scores,
ElementsAreArray(ArrayFloatNear({0.9, 0.7}, 1e-4)));
}
TEST(DetectionPostprocessOpTest, QuantizedTest) {
BaseDetectionPostprocessOpModel m(
{TensorType_UINT8, {1, 6, 4}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 3}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
TEST(DetectionPostprocessOpTest, MaxClass2Test) {
BaseDetectionPostprocessOpModel m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, 2);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 6, 4));
EXPECT_THAT(m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 10.0, 1.0, 11.0,
0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0,
0.0, 100.0, 1.0, 101.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 6));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0, 1, 0, 1}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 6));
EXPECT_THAT(
m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, .93, 0.9, 0.8, 0.3, 0.2}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
class DetectionPostprocessOpModelwithRegularNMS : public SingleOpModel {
public:
DetectionPostprocessOpModelwithRegularNMS(
const TensorData& input1, const TensorData& input2,
const TensorData& input3, const TensorData& output1,
const TensorData& output2, const TensorData& output3,
const TensorData& output4, bool use_regular_nms, int num_threads = 1) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
input3_ = AddInput(input3);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
output3_ = AddOutput(output3);
output4_ = AddOutput(output4);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("max_detections", 3);
fbb.Int("max_classes_per_detection", 1);
fbb.Int("detections_per_class", 1);
fbb.Bool("use_regular_nms", use_regular_nms);
fbb.Float("nms_score_threshold", 0.0);
fbb.Float("nms_iou_threshold", 0.5);
fbb.Int("num_classes", 2);
fbb.Float("y_scale", 10.0);
fbb.Float("x_scale", 10.0);
fbb.Float("h_scale", 5.0);
fbb.Float("w_scale", 5.0);
});
fbb.Finish();
SetCustomOp("TFLite_Detection_PostProcess", fbb.GetBuffer(),
Register_DETECTION_POSTPROCESS);
BuildInterpreter({GetShape(input1_), GetShape(input2_), GetShape(input3_)},
num_threads,
false,
true);
}
int input1() { return input1_; }
int input2() { return input2_; }
int input3() { return input3_; }
template <class T>
void SetInput1(std::initializer_list<T> data) {
PopulateTensor<T>(input1_, data);
}
template <class T>
void SetInput2(std::initializer_list<T> data) {
PopulateTensor<T>(input2_, data);
}
template <class T>
void SetInput3(std::initializer_list<T> data) {
PopulateTensor<T>(input3_, data);
}
template <class T>
std::vector<T> GetOutput1() {
return ExtractVector<T>(output1_);
}
template <class T>
std::vector<T> GetOutput2() {
return ExtractVector<T>(output2_);
}
template <class T>
std::vector<T> GetOutput3() {
return ExtractVector<T>(output3_);
}
template <class T>
std::vector<T> GetOutput4() {
return ExtractVector<T>(output4_);
}
std::vector<int> GetOutputShape1() { return GetTensorShape(output1_); }
std::vector<int> GetOutputShape2() { return GetTensorShape(output2_); }
std::vector<int> GetOutputShape3() { return GetTensorShape(output3_); }
std::vector<int> GetOutputShape4() { return GetTensorShape(output4_); }
protected:
int input1_;
int input2_;
int input3_;
int output1_;
int output2_;
int output3_;
int output4_;
};
TEST(DetectionPostprocessOpTest, FloatTestFastNMS) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest, QuantizedTestFastNMS) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_UINT8, {1, 6, 4}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 3}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
class DetectionPostprocessOpRegularTest
: public ::testing::TestWithParam<::testing::tuple<TensorType, int>> {
protected:
DetectionPostprocessOpRegularTest()
: tensor_type_(::testing::get<0>(GetParam())),
num_threads_(::testing::get<1>(GetParam())) {}
TensorType tensor_type_;
int num_threads_;
};
INSTANTIATE_TEST_SUITE_P(
DetectionPostprocessOpRegularTest, DetectionPostprocessOpRegularTest,
::testing::Combine(::testing::Values(TensorType_FLOAT32, TensorType_UINT8),
::testing::Values(1, 2)));
TEST_P(DetectionPostprocessOpRegularTest, RegularNMS) {
TensorData input1, input2, input3;
if (tensor_type_ == TensorType_UINT8) {
input1 = {tensor_type_, {1, 6, 4}, -1.0, 1.0};
input2 = {tensor_type_, {1, 6, 3}, 0.0, 1.0};
input3 = {tensor_type_, {6, 4}, 0.0, 100.5};
} else {
input1 = {tensor_type_, {1, 6, 4}};
input2 = {tensor_type_, {1, 6, 3}};
input3 = {tensor_type_, {6, 4}};
}
DetectionPostprocessOpModelwithRegularNMS m(
input1, input2, input3, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, true, num_threads_);
auto inputs1 = {
0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, -1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f
};
if (tensor_type_ == TensorType_UINT8) {
m.QuantizeAndPopulate<uint8_t>(m.input1(), std::vector<float>{inputs1});
} else {
m.SetInput1<float>(inputs1);
}
auto inputs2 = {0.f, .9f, .8f, 0.f, .75f, .72f, 0.f, .6f, .5f,
0.f, .93f, .95f, 0.f, .5f, .4f, 0.f, .3f, .2f};
if (tensor_type_ == TensorType_UINT8) {
m.QuantizeAndPopulate<uint8_t>(m.input2(), std::vector<float>{inputs2});
} else {
m.SetInput2<float>(inputs2);
}
auto inputs3 = {
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 10.5f, 1.0f, 1.0f,
0.5f, 10.5f, 1.0f, 1.0f,
0.5f, 100.5f, 1.0f, 1.0f
};
if (tensor_type_ == TensorType_UINT8) {
m.QuantizeAndPopulate<uint8_t>(m.input3(), std::vector<float>{inputs3});
} else {
m.SetInput3<float>(inputs3);
}
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 0.0, 0.0},
3e-1)));
} else {
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 0.0, 0.0},
3e-4)));
}
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
} else {
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
}
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.0}, 1e-1)));
} else {
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.93, 0.0}, 1e-4)));
}
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
if (tensor_type_ == TensorType_UINT8) {
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({2.0}, 1e-1)));
} else {
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({2.0}, 1e-4)));
}
}
TEST(DetectionPostprocessOpTest, FloatTestwithNoBackgroundClassAndNoKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 4}}, {TensorType_FLOAT32, {1, 6, 2}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
});
m.SetInput2<float>({.9, .8, .75, .72, .6, .5, .93, .95, .5, .4, .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
TEST(DetectionPostprocessOpTest, FloatTestwithBackgroundClassAndKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 5}}, {TensorType_FLOAT32, {1, 6, 3}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
});
m.SetInput2<float>({0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0.,
.5, .4, 0., .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest,
QuantizedTestwithNoBackgroundClassAndKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_UINT8, {1, 6, 5}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 2}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {
{.9, .8, .75, .72, .6, .5, .93, .95, .5, .4, .3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-1)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
TEST(DetectionPostprocessOpTest, FloatTestwithNoBackgroundClassAndKeypoints) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_FLOAT32, {1, 6, 5}}, {TensorType_FLOAT32, {1, 6, 2}},
{TensorType_FLOAT32, {6, 4}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
m.SetInput1<float>({
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
});
m.SetInput2<float>({.9, .8, .75, .72, .6, .5, .93, .95, .5, .4, .3, .2});
m.SetInput3<float>({
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
1e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({1, 0, 0}, 1e-4)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(ArrayFloatNear({0.95, 0.9, 0.3}, 1e-4)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-4)));
}
TEST(DetectionPostprocessOpTest,
QuantizedTestwithNoBackgroundClassAndKeypointsStableSort) {
DetectionPostprocessOpModelwithRegularNMS m(
{TensorType_UINT8, {1, 6, 5}, -1.0, 1.0},
{TensorType_UINT8, {1, 6, 2}, 0.0, 1.0},
{TensorType_UINT8, {6, 4}, 0.0, 100.5}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, -1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
std::vector<std::vector<float>> inputs2 = {
{0.015625, 0.007812, 0.003906, 0.015625, 0.015625, 0.007812, 0.019531,
0.019531, 0.007812, 0.003906, 0.003906, 0.003906}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 10.5, 1.0, 1.0,
0.5, 100.5, 1.0, 1.0
}};
m.QuantizeAndPopulate<uint8_t>(m.input3(), inputs3[0]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape1 = m.GetOutputShape1();
EXPECT_THAT(output_shape1, ElementsAre(1, 3, 4));
EXPECT_THAT(
m.GetOutput1<float>(),
ElementsAreArray(ArrayFloatNear(
{0.0, 10.0, 1.0, 11.0, 0.0, 0.0, 1.0, 1.0, 0.0, 100.0, 1.0, 101.0},
3e-1)));
std::vector<int> output_shape2 = m.GetOutputShape2();
EXPECT_THAT(output_shape2, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput2<float>(),
ElementsAreArray(ArrayFloatNear({0, 0, 0}, 1e-1)));
std::vector<int> output_shape3 = m.GetOutputShape3();
EXPECT_THAT(output_shape3, ElementsAre(1, 3));
EXPECT_THAT(m.GetOutput3<float>(),
ElementsAreArray(
ArrayFloatNear({0.0196078, 0.0156863, 0.00392157}, 1e-7)));
std::vector<int> output_shape4 = m.GetOutputShape4();
EXPECT_THAT(output_shape4, ElementsAre(1));
EXPECT_THAT(m.GetOutput4<float>(),
ElementsAreArray(ArrayFloatNear({3.0}, 1e-1)));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/detection_postprocess.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/detection_postprocess_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bee8605-291b-4815-9e79-77612b2128b2 | cpp | tensorflow/tensorflow | type_to_tflitetype | tensorflow/lite/type_to_tflitetype.h | tensorflow/lite/type_to_tflitetype_test.cc | #ifndef TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
#define TENSORFLOW_LITE_TYPE_TO_TFLITETYPE_H_
#include <complex>
#include <string>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
namespace tflite {
MATCH_TYPE_AND_TFLITE_TYPE(std::string, kTfLiteString);
MATCH_TYPE_AND_TFLITE_TYPE(std::complex<float>, kTfLiteComplex64);
MATCH_TYPE_AND_TFLITE_TYPE(std::complex<double>, kTfLiteComplex128);
}
#endif | #include "tensorflow/lite/type_to_tflitetype.h"
#include <string>
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
namespace tflite {
namespace {
TEST(TypeToTfLiteType, TypeMapsAreInverseOfEachOther) {
EXPECT_EQ(kTfLiteInt16,
typeToTfLiteType<TfLiteTypeToType<kTfLiteInt16>::Type>());
EXPECT_EQ(kTfLiteUInt16,
typeToTfLiteType<TfLiteTypeToType<kTfLiteUInt16>::Type>());
EXPECT_EQ(kTfLiteInt32,
typeToTfLiteType<TfLiteTypeToType<kTfLiteInt32>::Type>());
EXPECT_EQ(kTfLiteUInt32,
typeToTfLiteType<TfLiteTypeToType<kTfLiteUInt32>::Type>());
EXPECT_EQ(kTfLiteFloat32,
typeToTfLiteType<TfLiteTypeToType<kTfLiteFloat32>::Type>());
EXPECT_EQ(kTfLiteUInt8,
typeToTfLiteType<TfLiteTypeToType<kTfLiteUInt8>::Type>());
EXPECT_EQ(kTfLiteInt8,
typeToTfLiteType<TfLiteTypeToType<kTfLiteInt8>::Type>());
EXPECT_EQ(kTfLiteBool,
typeToTfLiteType<TfLiteTypeToType<kTfLiteBool>::Type>());
EXPECT_EQ(kTfLiteComplex64,
typeToTfLiteType<TfLiteTypeToType<kTfLiteComplex64>::Type>());
EXPECT_EQ(kTfLiteComplex128,
typeToTfLiteType<TfLiteTypeToType<kTfLiteComplex128>::Type>());
EXPECT_EQ(kTfLiteString,
typeToTfLiteType<TfLiteTypeToType<kTfLiteString>::Type>());
EXPECT_EQ(kTfLiteFloat16,
typeToTfLiteType<TfLiteTypeToType<kTfLiteFloat16>::Type>());
EXPECT_EQ(kTfLiteFloat64,
typeToTfLiteType<TfLiteTypeToType<kTfLiteFloat64>::Type>());
}
TEST(TypeToTfLiteType, Sanity) {
EXPECT_EQ(kTfLiteFloat32, typeToTfLiteType<float>());
EXPECT_EQ(kTfLiteBool, typeToTfLiteType<bool>());
EXPECT_EQ(kTfLiteString, typeToTfLiteType<std::string>());
static_assert(
std::is_same<float, TfLiteTypeToType<kTfLiteFloat32>::Type>::value,
"TfLiteTypeToType test failure");
static_assert(std::is_same<bool, TfLiteTypeToType<kTfLiteBool>::Type>::value,
"TfLiteTypeToType test failure");
static_assert(
std::is_same<std::string, TfLiteTypeToType<kTfLiteString>::Type>::value,
"TfLiteTypeToType test failure");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/type_to_tflitetype.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/type_to_tflitetype_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ab755d8-89d5-4e68-8d5d-83ff2fadfc9a | cpp | google/quiche | quic_interval | quiche/quic/core/quic_interval.h | quiche/quic/core/quic_interval_test.cc | #ifndef QUICHE_QUIC_CORE_QUIC_INTERVAL_H_
#define QUICHE_QUIC_CORE_QUIC_INTERVAL_H_
#include <stddef.h>
#include <algorithm>
#include <ostream>
#include <type_traits>
#include <utility>
#include <vector>
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
template <typename T>
class QUICHE_NO_EXPORT QuicInterval {
private:
template <typename U>
class QUICHE_NO_EXPORT DiffTypeOrVoid {
private:
template <typename V>
static auto f(const V* v) -> decltype(*v - *v);
template <typename V>
static void f(...);
public:
using type = typename std::decay<decltype(f<U>(nullptr))>::type;
};
public:
QuicInterval() : min_(), max_() {}
QuicInterval(const T& min, const T& max) : min_(min), max_(max) {}
template <typename U1, typename U2,
typename = typename std::enable_if<
std::is_convertible<U1, T>::value &&
std::is_convertible<U2, T>::value>::type>
QuicInterval(U1&& min, U2&& max)
: min_(std::forward<U1>(min)), max_(std::forward<U2>(max)) {}
const T& min() const { return min_; }
const T& max() const { return max_; }
void SetMin(const T& t) { min_ = t; }
void SetMax(const T& t) { max_ = t; }
void Set(const T& min, const T& max) {
SetMin(min);
SetMax(max);
}
void Clear() { *this = {}; }
bool Empty() const { return min() >= max(); }
typename DiffTypeOrVoid<T>::type Length() const {
return (Empty() ? min() : max()) - min();
}
bool Contains(const T& t) const { return min() <= t && max() > t; }
bool Contains(const QuicInterval& i) const {
return !Empty() && !i.Empty() && min() <= i.min() && max() >= i.max();
}
bool Intersects(const QuicInterval& i) const {
return !Empty() && !i.Empty() && min() < i.max() && max() > i.min();
}
bool Intersects(const QuicInterval& i, QuicInterval* out) const;
bool IntersectWith(const QuicInterval& i);
bool Separated(const QuicInterval& other) const {
if (Empty() || other.Empty()) return true;
return other.max() < min() || max() < other.min();
}
bool SpanningUnion(const QuicInterval& i);
bool Difference(const QuicInterval& i,
std::vector<QuicInterval*>* difference) const;
bool Difference(const QuicInterval& i, QuicInterval* lo,
QuicInterval* hi) const;
friend bool operator==(const QuicInterval& a, const QuicInterval& b) {
bool ae = a.Empty();
bool be = b.Empty();
if (ae && be) return true;
if (ae != be) return false;
return a.min() == b.min() && a.max() == b.max();
}
friend bool operator!=(const QuicInterval& a, const QuicInterval& b) {
return !(a == b);
}
friend bool operator<(const QuicInterval& a, const QuicInterval& b) {
return a.min() < b.min() || (!(b.min() < a.min()) && b.max() < a.max());
}
private:
T min_;
T max_;
};
template <typename T>
QuicInterval<T> MakeQuicInterval(T&& lhs, T&& rhs) {
return QuicInterval<T>(std::forward<T>(lhs), std::forward<T>(rhs));
}
template <typename T>
auto operator<<(std::ostream& out, const QuicInterval<T>& i)
-> decltype(out << i.min()) {
return out << "[" << i.min() << ", " << i.max() << ")";
}
template <typename T>
bool QuicInterval<T>::Intersects(const QuicInterval& i,
QuicInterval* out) const {
if (!Intersects(i)) return false;
if (out != nullptr) {
*out = QuicInterval(std::max(min(), i.min()), std::min(max(), i.max()));
}
return true;
}
template <typename T>
bool QuicInterval<T>::IntersectWith(const QuicInterval& i) {
if (Empty()) return false;
bool modified = false;
if (i.min() > min()) {
SetMin(i.min());
modified = true;
}
if (i.max() < max()) {
SetMax(i.max());
modified = true;
}
return modified;
}
template <typename T>
bool QuicInterval<T>::SpanningUnion(const QuicInterval& i) {
if (i.Empty()) return false;
if (Empty()) {
*this = i;
return true;
}
bool modified = false;
if (i.min() < min()) {
SetMin(i.min());
modified = true;
}
if (i.max() > max()) {
SetMax(i.max());
modified = true;
}
return modified;
}
template <typename T>
bool QuicInterval<T>::Difference(const QuicInterval& i,
std::vector<QuicInterval*>* difference) const {
if (Empty()) {
return false;
}
if (i.Empty()) {
difference->push_back(new QuicInterval(*this));
return false;
}
if (min() < i.max() && min() >= i.min() && max() > i.max()) {
difference->push_back(new QuicInterval(i.max(), max()));
return true;
}
if (max() > i.min() && max() <= i.max() && min() < i.min()) {
difference->push_back(new QuicInterval(min(), i.min()));
return true;
}
if (min() < i.min() && max() > i.max()) {
difference->push_back(new QuicInterval(min(), i.min()));
difference->push_back(new QuicInterval(i.max(), max()));
return true;
}
if (min() >= i.min() && max() <= i.max()) {
return true;
}
difference->push_back(new QuicInterval(*this));
return false;
}
template <typename T>
bool QuicInterval<T>::Difference(const QuicInterval& i, QuicInterval* lo,
QuicInterval* hi) const {
*lo = {};
*hi = {};
if (Empty()) return false;
if (i.Empty()) {
*lo = *this;
return false;
}
if (min() < i.max() && min() >= i.min() && max() > i.max()) {
*hi = QuicInterval(i.max(), max());
return true;
}
if (max() > i.min() && max() <= i.max() && min() < i.min()) {
*lo = QuicInterval(min(), i.min());
return true;
}
if (min() < i.min() && max() > i.max()) {
*lo = QuicInterval(min(), i.min());
*hi = QuicInterval(i.max(), max());
return true;
}
if (min() >= i.min() && max() <= i.max()) {
return true;
}
*lo = *this;
return false;
}
}
#endif | #include "quiche/quic/core/quic_interval.h"
#include <ostream>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
template <typename ForwardIterator>
void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
while (begin != end) {
auto temp = begin;
++begin;
delete *temp;
}
}
template <typename T>
void STLDeleteElements(T* container) {
if (!container) return;
STLDeleteContainerPointers(container->begin(), container->end());
container->clear();
}
class ConstructorListener {
public:
ConstructorListener(int* copy_construct_counter, int* move_construct_counter)
: copy_construct_counter_(copy_construct_counter),
move_construct_counter_(move_construct_counter) {
*copy_construct_counter_ = 0;
*move_construct_counter_ = 0;
}
ConstructorListener(const ConstructorListener& other) {
copy_construct_counter_ = other.copy_construct_counter_;
move_construct_counter_ = other.move_construct_counter_;
++*copy_construct_counter_;
}
ConstructorListener(ConstructorListener&& other) {
copy_construct_counter_ = other.copy_construct_counter_;
move_construct_counter_ = other.move_construct_counter_;
++*move_construct_counter_;
}
bool operator<(const ConstructorListener&) { return false; }
bool operator>(const ConstructorListener&) { return false; }
bool operator<=(const ConstructorListener&) { return true; }
bool operator>=(const ConstructorListener&) { return true; }
bool operator==(const ConstructorListener&) { return true; }
private:
int* copy_construct_counter_;
int* move_construct_counter_;
};
TEST(QuicIntervalConstructorTest, Move) {
int object1_copy_count, object1_move_count;
ConstructorListener object1(&object1_copy_count, &object1_move_count);
int object2_copy_count, object2_move_count;
ConstructorListener object2(&object2_copy_count, &object2_move_count);
QuicInterval<ConstructorListener> interval(object1, std::move(object2));
EXPECT_EQ(1, object1_copy_count);
EXPECT_EQ(0, object1_move_count);
EXPECT_EQ(0, object2_copy_count);
EXPECT_EQ(1, object2_move_count);
}
TEST(QuicIntervalConstructorTest, ImplicitConversion) {
struct WrappedInt {
WrappedInt(int value) : value(value) {}
bool operator<(const WrappedInt& other) { return value < other.value; }
bool operator>(const WrappedInt& other) { return value > other.value; }
bool operator<=(const WrappedInt& other) { return value <= other.value; }
bool operator>=(const WrappedInt& other) { return value >= other.value; }
bool operator==(const WrappedInt& other) { return value == other.value; }
int value;
};
static_assert(std::is_convertible<int, WrappedInt>::value, "");
static_assert(
std::is_constructible<QuicInterval<WrappedInt>, int, int>::value, "");
QuicInterval<WrappedInt> i(10, 20);
EXPECT_EQ(10, i.min().value);
EXPECT_EQ(20, i.max().value);
}
class QuicIntervalTest : public QuicTest {
protected:
void TestIntersect(const QuicInterval<int64_t>& i1,
const QuicInterval<int64_t>& i2, bool changes_i1,
bool changes_i2, const QuicInterval<int64_t>& result) {
QuicInterval<int64_t> i;
i = i1;
EXPECT_TRUE(i.IntersectWith(i2) == changes_i1 && i == result);
i = i2;
EXPECT_TRUE(i.IntersectWith(i1) == changes_i2 && i == result);
}
};
TEST_F(QuicIntervalTest, ConstructorsCopyAndClear) {
QuicInterval<int32_t> empty;
EXPECT_TRUE(empty.Empty());
QuicInterval<int32_t> d2(0, 100);
EXPECT_EQ(0, d2.min());
EXPECT_EQ(100, d2.max());
EXPECT_EQ(QuicInterval<int32_t>(0, 100), d2);
EXPECT_NE(QuicInterval<int32_t>(0, 99), d2);
empty = d2;
EXPECT_EQ(0, d2.min());
EXPECT_EQ(100, d2.max());
EXPECT_TRUE(empty == d2);
EXPECT_EQ(empty, d2);
EXPECT_TRUE(d2 == empty);
EXPECT_EQ(d2, empty);
QuicInterval<int32_t> max_less_than_min(40, 20);
EXPECT_TRUE(max_less_than_min.Empty());
EXPECT_EQ(40, max_less_than_min.min());
EXPECT_EQ(20, max_less_than_min.max());
QuicInterval<int> d3(10, 20);
d3.Clear();
EXPECT_TRUE(d3.Empty());
}
TEST_F(QuicIntervalTest, MakeQuicInterval) {
static_assert(
std::is_same<QuicInterval<int>, decltype(MakeQuicInterval(0, 3))>::value,
"Type is deduced incorrectly.");
static_assert(std::is_same<QuicInterval<double>,
decltype(MakeQuicInterval(0., 3.))>::value,
"Type is deduced incorrectly.");
EXPECT_EQ(MakeQuicInterval(0., 3.), QuicInterval<double>(0, 3));
}
TEST_F(QuicIntervalTest, GettersSetters) {
QuicInterval<int32_t> d1(100, 200);
d1.SetMin(30);
EXPECT_EQ(30, d1.min());
EXPECT_EQ(200, d1.max());
d1.SetMax(220);
EXPECT_EQ(30, d1.min());
EXPECT_EQ(220, d1.max());
d1.Clear();
d1.Set(30, 220);
EXPECT_EQ(30, d1.min());
EXPECT_EQ(220, d1.max());
QuicInterval<int32_t> d2;
EXPECT_TRUE(!d1.SpanningUnion(d2));
EXPECT_EQ(30, d1.min());
EXPECT_EQ(220, d1.max());
EXPECT_TRUE(d2.SpanningUnion(d1));
EXPECT_EQ(30, d2.min());
EXPECT_EQ(220, d2.max());
d2.SetMin(40);
d2.SetMax(100);
EXPECT_TRUE(!d1.SpanningUnion(d2));
EXPECT_EQ(30, d1.min());
EXPECT_EQ(220, d1.max());
d2.SetMin(20);
d2.SetMax(100);
EXPECT_TRUE(d1.SpanningUnion(d2));
EXPECT_EQ(20, d1.min());
EXPECT_EQ(220, d1.max());
d2.SetMin(50);
d2.SetMax(300);
EXPECT_TRUE(d1.SpanningUnion(d2));
EXPECT_EQ(20, d1.min());
EXPECT_EQ(300, d1.max());
d2.SetMin(0);
d2.SetMax(500);
EXPECT_TRUE(d1.SpanningUnion(d2));
EXPECT_EQ(0, d1.min());
EXPECT_EQ(500, d1.max());
d2.SetMin(100);
d2.SetMax(0);
EXPECT_TRUE(!d1.SpanningUnion(d2));
EXPECT_EQ(0, d1.min());
EXPECT_EQ(500, d1.max());
EXPECT_TRUE(d2.SpanningUnion(d1));
EXPECT_EQ(0, d2.min());
EXPECT_EQ(500, d2.max());
}
TEST_F(QuicIntervalTest, CoveringOps) {
const QuicInterval<int64_t> empty;
const QuicInterval<int64_t> d(100, 200);
const QuicInterval<int64_t> d1(0, 50);
const QuicInterval<int64_t> d2(50, 110);
const QuicInterval<int64_t> d3(110, 180);
const QuicInterval<int64_t> d4(180, 220);
const QuicInterval<int64_t> d5(220, 300);
const QuicInterval<int64_t> d6(100, 150);
const QuicInterval<int64_t> d7(150, 200);
const QuicInterval<int64_t> d8(0, 300);
EXPECT_TRUE(d.Intersects(d));
EXPECT_TRUE(!empty.Intersects(d) && !d.Intersects(empty));
EXPECT_TRUE(!d.Intersects(d1) && !d1.Intersects(d));
EXPECT_TRUE(d.Intersects(d2) && d2.Intersects(d));
EXPECT_TRUE(d.Intersects(d3) && d3.Intersects(d));
EXPECT_TRUE(d.Intersects(d4) && d4.Intersects(d));
EXPECT_TRUE(!d.Intersects(d5) && !d5.Intersects(d));
EXPECT_TRUE(d.Intersects(d6) && d6.Intersects(d));
EXPECT_TRUE(d.Intersects(d7) && d7.Intersects(d));
EXPECT_TRUE(d.Intersects(d8) && d8.Intersects(d));
QuicInterval<int64_t> i;
EXPECT_TRUE(d.Intersects(d, &i) && d == i);
EXPECT_TRUE(!empty.Intersects(d, nullptr) && !d.Intersects(empty, nullptr));
EXPECT_TRUE(!d.Intersects(d1, nullptr) && !d1.Intersects(d, nullptr));
EXPECT_TRUE(d.Intersects(d2, &i) && i == QuicInterval<int64_t>(100, 110));
EXPECT_TRUE(d2.Intersects(d, &i) && i == QuicInterval<int64_t>(100, 110));
EXPECT_TRUE(d.Intersects(d3, &i) && i == d3);
EXPECT_TRUE(d3.Intersects(d, &i) && i == d3);
EXPECT_TRUE(d.Intersects(d4, &i) && i == QuicInterval<int64_t>(180, 200));
EXPECT_TRUE(d4.Intersects(d, &i) && i == QuicInterval<int64_t>(180, 200));
EXPECT_TRUE(!d.Intersects(d5, nullptr) && !d5.Intersects(d, nullptr));
EXPECT_TRUE(d.Intersects(d6, &i) && i == d6);
EXPECT_TRUE(d6.Intersects(d, &i) && i == d6);
EXPECT_TRUE(d.Intersects(d7, &i) && i == d7);
EXPECT_TRUE(d7.Intersects(d, &i) && i == d7);
EXPECT_TRUE(d.Intersects(d8, &i) && i == d);
EXPECT_TRUE(d8.Intersects(d, &i) && i == d);
TestIntersect(empty, d, false, true, empty);
TestIntersect(d, d1, true, true, empty);
TestIntersect(d1, d2, true, true, empty);
TestIntersect(d, d2, true, true, QuicInterval<int64_t>(100, 110));
TestIntersect(d8, d, true, false, d);
TestIntersect(d8, d1, true, false, d1);
TestIntersect(d8, d5, true, false, d5);
EXPECT_TRUE(!empty.Contains(d) && !d.Contains(empty));
EXPECT_TRUE(d.Contains(d));
EXPECT_TRUE(!d.Contains(d1) && !d1.Contains(d));
EXPECT_TRUE(!d.Contains(d2) && !d2.Contains(d));
EXPECT_TRUE(d.Contains(d3) && !d3.Contains(d));
EXPECT_TRUE(!d.Contains(d4) && !d4.Contains(d));
EXPECT_TRUE(!d.Contains(d5) && !d5.Contains(d));
EXPECT_TRUE(d.Contains(d6) && !d6.Contains(d));
EXPECT_TRUE(d.Contains(d7) && !d7.Contains(d));
EXPECT_TRUE(!d.Contains(d8) && d8.Contains(d));
EXPECT_TRUE(d.Contains(100));
EXPECT_TRUE(!d.Contains(200));
EXPECT_TRUE(d.Contains(150));
EXPECT_TRUE(!d.Contains(99));
EXPECT_TRUE(!d.Contains(201));
std::vector<QuicInterval<int64_t>*> diff;
EXPECT_TRUE(!d.Difference(empty, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(100, diff[0]->min());
EXPECT_EQ(200, diff[0]->max());
STLDeleteElements(&diff);
EXPECT_TRUE(!empty.Difference(d, &diff) && diff.empty());
EXPECT_TRUE(d.Difference(d, &diff) && diff.empty());
EXPECT_TRUE(!d.Difference(d1, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(100, diff[0]->min());
EXPECT_EQ(200, diff[0]->max());
STLDeleteElements(&diff);
QuicInterval<int64_t> lo;
QuicInterval<int64_t> hi;
EXPECT_TRUE(d.Difference(d2, &lo, &hi));
EXPECT_TRUE(lo.Empty());
EXPECT_EQ(110, hi.min());
EXPECT_EQ(200, hi.max());
EXPECT_TRUE(d.Difference(d2, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(110, diff[0]->min());
EXPECT_EQ(200, diff[0]->max());
STLDeleteElements(&diff);
EXPECT_TRUE(d.Difference(d3, &lo, &hi));
EXPECT_EQ(100, lo.min());
EXPECT_EQ(110, lo.max());
EXPECT_EQ(180, hi.min());
EXPECT_EQ(200, hi.max());
EXPECT_TRUE(d.Difference(d3, &diff));
EXPECT_EQ(2u, diff.size());
EXPECT_EQ(100, diff[0]->min());
EXPECT_EQ(110, diff[0]->max());
EXPECT_EQ(180, diff[1]->min());
EXPECT_EQ(200, diff[1]->max());
STLDeleteElements(&diff);
EXPECT_TRUE(d.Difference(d4, &lo, &hi));
EXPECT_EQ(100, lo.min());
EXPECT_EQ(180, lo.max());
EXPECT_TRUE(hi.Empty());
EXPECT_TRUE(d.Difference(d4, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(100, diff[0]->min());
EXPECT_EQ(180, diff[0]->max());
STLDeleteElements(&diff);
EXPECT_FALSE(d.Difference(d5, &lo, &hi));
EXPECT_EQ(100, lo.min());
EXPECT_EQ(200, lo.max());
EXPECT_TRUE(hi.Empty());
EXPECT_FALSE(d.Difference(d5, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(100, diff[0]->min());
EXPECT_EQ(200, diff[0]->max());
STLDeleteElements(&diff);
EXPECT_TRUE(d.Difference(d6, &lo, &hi));
EXPECT_TRUE(lo.Empty());
EXPECT_EQ(150, hi.min());
EXPECT_EQ(200, hi.max());
EXPECT_TRUE(d.Difference(d6, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(150, diff[0]->min());
EXPECT_EQ(200, diff[0]->max());
STLDeleteElements(&diff);
EXPECT_TRUE(d.Difference(d7, &lo, &hi));
EXPECT_EQ(100, lo.min());
EXPECT_EQ(150, lo.max());
EXPECT_TRUE(hi.Empty());
EXPECT_TRUE(d.Difference(d7, &diff));
EXPECT_EQ(1u, diff.size());
EXPECT_EQ(100, diff[0]->min());
EXPECT_EQ(150, diff[0]->max());
STLDeleteElements(&diff);
EXPECT_TRUE(d.Difference(d8, &lo, &hi));
EXPECT_TRUE(lo.Empty());
EXPECT_TRUE(hi.Empty());
EXPECT_TRUE(d.Difference(d8, &diff) && diff.empty());
}
TEST_F(QuicIntervalTest, Separated) {
using QI = QuicInterval<int>;
EXPECT_FALSE(QI(100, 200).Separated(QI(100, 200)));
EXPECT_FALSE(QI(100, 200).Separated(QI(200, 300)));
EXPECT_TRUE(QI(100, 200).Separated(QI(201, 300)));
EXPECT_FALSE(QI(100, 200).Separated(QI(0, 100)));
EXPECT_TRUE(QI(100, 200).Separated(QI(0, 99)));
EXPECT_FALSE(QI(100, 200).Separated(QI(150, 170)));
EXPECT_FALSE(QI(150, 170).Separated(QI(100, 200)));
EXPECT_FALSE(QI(100, 200).Separated(QI(150, 250)));
EXPECT_FALSE(QI(150, 250).Separated(QI(100, 200)));
}
TEST_F(QuicIntervalTest, Length) {
const QuicInterval<int> empty1;
const QuicInterval<int> empty2(1, 1);
const QuicInterval<int> empty3(1, 0);
const QuicInterval<QuicTime> empty4(
QuicTime::Zero() + QuicTime::Delta::FromSeconds(1), QuicTime::Zero());
const QuicInterval<int> d1(1, 2);
const QuicInterval<int> d2(0, 50);
const QuicInterval<QuicTime> d3(
QuicTime::Zero(), QuicTime::Zero() + QuicTime::Delta::FromSeconds(1));
const QuicInterval<QuicTime> d4(
QuicTime::Zero() + QuicTime::Delta::FromSeconds(3600),
QuicTime::Zero() + QuicTime::Delta::FromSeconds(5400));
EXPECT_EQ(0, empty1.Length());
EXPECT_EQ(0, empty2.Length());
EXPECT_EQ(0, empty3.Length());
EXPECT_EQ(QuicTime::Delta::Zero(), empty4.Length());
EXPECT_EQ(1, d1.Length());
EXPECT_EQ(50, d2.Length());
EXPECT_EQ(QuicTime::Delta::FromSeconds(1), d3.Length());
EXPECT_EQ(QuicTime::Delta::FromSeconds(1800), d4.Length());
}
TEST_F(QuicIntervalTest, IntervalOfTypeWithNoOperatorMinus) {
const QuicInterval<std::string> d1("a", "b");
const QuicInterval<std::pair<int, int>> d2({1, 2}, {4, 3});
EXPECT_EQ("a", d1.min());
EXPECT_EQ("b", d1.max());
EXPECT_EQ(std::make_pair(1, 2), d2.min());
EXPECT_EQ(std::make_pair(4, 3), d2.max());
}
struct NoEquals {
NoEquals(int v) : value(v) {}
int value;
bool operator<(const NoEquals& other) const { return value < other.value; }
};
TEST_F(QuicIntervalTest, OrderedComparisonForTypeWithoutEquals) {
const QuicInterval<NoEquals> d1(0, 4);
const QuicInterval<NoEquals> d2(0, 3);
const QuicInterval<NoEquals> d3(1, 4);
const QuicInterval<NoEquals> d4(1, 5);
const QuicInterval<NoEquals> d6(0, 4);
EXPECT_TRUE(d1 < d2);
EXPECT_TRUE(d1 < d3);
EXPECT_TRUE(d1 < d4);
EXPECT_FALSE(d1 < d6);
}
TEST_F(QuicIntervalTest, OutputReturnsOstreamRef) {
std::stringstream ss;
const QuicInterval<int> v(1, 2);
auto return_type_is_a_ref = [](std::ostream&) {};
return_type_is_a_ref(ss << v);
}
struct NotOstreamable {
bool operator<(const NotOstreamable&) const { return false; }
bool operator>=(const NotOstreamable&) const { return true; }
bool operator==(const NotOstreamable&) const { return true; }
};
TEST_F(QuicIntervalTest, IntervalOfTypeWithNoOstreamSupport) {
const NotOstreamable v;
const QuicInterval<NotOstreamable> d(v, v);
EXPECT_EQ(d, d);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_interval.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_interval_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ce5dca60-fff5-4672-8eb0-97c15b454f73 | cpp | google/cel-cpp | regex_functions | extensions/regex_functions.cc | extensions/regex_functions_test.cc | #include "extensions/regex_functions.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/portable_cel_function_adapter.h"
#include "re2/re2.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::runtime::CelFunction;
using ::google::api::expr::runtime::CelFunctionRegistry;
using ::google::api::expr::runtime::CelValue;
using ::google::api::expr::runtime::CreateErrorValue;
using ::google::api::expr::runtime::InterpreterOptions;
using ::google::api::expr::runtime::PortableBinaryFunctionAdapter;
using ::google::api::expr::runtime::PortableFunctionAdapter;
using ::google::protobuf::Arena;
CelValue ExtractString(Arena* arena, CelValue::StringHolder target,
CelValue::StringHolder regex,
CelValue::StringHolder rewrite) {
RE2 re2(regex.value());
if (!re2.ok()) {
return CreateErrorValue(
arena, absl::InvalidArgumentError("Given Regex is Invalid"));
}
std::string output;
auto result = RE2::Extract(target.value(), re2, rewrite.value(), &output);
if (!result) {
return CreateErrorValue(
arena, absl::InvalidArgumentError(
"Unable to extract string for the given regex"));
}
return CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(arena, output));
}
CelValue CaptureString(Arena* arena, CelValue::StringHolder target,
CelValue::StringHolder regex) {
RE2 re2(regex.value());
if (!re2.ok()) {
return CreateErrorValue(
arena, absl::InvalidArgumentError("Given Regex is Invalid"));
}
std::string output;
auto result = RE2::FullMatch(target.value(), re2, &output);
if (!result) {
return CreateErrorValue(
arena, absl::InvalidArgumentError(
"Unable to capture groups for the given regex"));
} else {
auto cel_value = CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(arena, output));
return cel_value;
}
}
CelValue CaptureStringN(Arena* arena, CelValue::StringHolder target,
CelValue::StringHolder regex) {
RE2 re2(regex.value());
if (!re2.ok()) {
return CreateErrorValue(
arena, absl::InvalidArgumentError("Given Regex is Invalid"));
}
const int capturing_groups_count = re2.NumberOfCapturingGroups();
const auto& named_capturing_groups_map = re2.CapturingGroupNames();
if (capturing_groups_count <= 0) {
return CreateErrorValue(arena,
absl::InvalidArgumentError(
"Capturing groups were not found in the given "
"regex."));
}
std::vector<std::string> captured_strings(capturing_groups_count);
std::vector<RE2::Arg> captured_string_addresses(capturing_groups_count);
std::vector<RE2::Arg*> argv(capturing_groups_count);
for (int j = 0; j < capturing_groups_count; j++) {
captured_string_addresses[j] = &captured_strings[j];
argv[j] = &captured_string_addresses[j];
}
auto result =
RE2::FullMatchN(target.value(), re2, argv.data(), capturing_groups_count);
if (!result) {
return CreateErrorValue(
arena, absl::InvalidArgumentError(
"Unable to capture groups for the given regex"));
}
std::vector<std::pair<CelValue, CelValue>> cel_values;
for (int index = 1; index <= capturing_groups_count; index++) {
auto it = named_capturing_groups_map.find(index);
std::string name = it != named_capturing_groups_map.end()
? it->second
: std::to_string(index);
cel_values.emplace_back(
CelValue::CreateString(google::protobuf::Arena::Create<std::string>(arena, name)),
CelValue::CreateString(google::protobuf::Arena::Create<std::string>(
arena, captured_strings[index - 1])));
}
auto container_map = google::api::expr::runtime::CreateContainerBackedMap(
absl::MakeSpan(cel_values));
::google::api::expr::runtime::CelMap* cel_map = container_map->release();
arena->Own(cel_map);
return CelValue::CreateMap(cel_map);
}
absl::Status RegisterRegexFunctions(CelFunctionRegistry* registry) {
CEL_RETURN_IF_ERROR(
(PortableFunctionAdapter<CelValue, CelValue::StringHolder,
CelValue::StringHolder, CelValue::StringHolder>::
CreateAndRegister(
kRegexExtract, false,
[](Arena* arena, CelValue::StringHolder target,
CelValue::StringHolder regex,
CelValue::StringHolder rewrite) -> CelValue {
return ExtractString(arena, target, regex, rewrite);
},
registry)));
CEL_RETURN_IF_ERROR(registry->Register(
PortableBinaryFunctionAdapter<CelValue, CelValue::StringHolder,
CelValue::StringHolder>::
Create(kRegexCapture, false,
[](Arena* arena, CelValue::StringHolder target,
CelValue::StringHolder regex) -> CelValue {
return CaptureString(arena, target, regex);
})));
return registry->Register(
PortableBinaryFunctionAdapter<CelValue, CelValue::StringHolder,
CelValue::StringHolder>::
Create(kRegexCaptureN, false,
[](Arena* arena, CelValue::StringHolder target,
CelValue::StringHolder regex) -> CelValue {
return CaptureStringN(arena, target, regex);
}));
}
}
absl::Status RegisterRegexFunctions(CelFunctionRegistry* registry,
const InterpreterOptions& options) {
if (options.enable_regex) {
CEL_RETURN_IF_ERROR(RegisterRegexFunctions(registry));
}
return absl::OkStatus();
}
} | #include "extensions/regex_functions.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/arena.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "eval/public/activation.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/testing/matchers.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace cel::extensions {
namespace {
using ::absl_testing::StatusIs;
using ::google::api::expr::runtime::CelValue;
using Builder = ::google::api::expr::runtime::CelExpressionBuilder;
using ::absl_testing::IsOkAndHolds;
using ::google::api::expr::parser::Parse;
using ::google::api::expr::runtime::test::IsCelError;
using ::google::api::expr::runtime::test::IsCelString;
struct TestCase {
const std::string expr_string;
const std::string expected_result;
};
class RegexFunctionsTest : public ::testing::TestWithParam<TestCase> {
public:
RegexFunctionsTest() {
options_.enable_regex = true;
options_.enable_qualified_identifier_rewrites = true;
builder_ = CreateCelExpressionBuilder(options_);
}
absl::StatusOr<CelValue> TestCaptureStringInclusion(
const std::string& expr_string) {
CEL_RETURN_IF_ERROR(
RegisterRegexFunctions(builder_->GetRegistry(), options_));
CEL_ASSIGN_OR_RETURN(auto parsed_expr, Parse(expr_string));
CEL_ASSIGN_OR_RETURN(
auto expr_plan, builder_->CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
::google::api::expr::runtime::Activation activation;
return expr_plan->Evaluate(activation, &arena_);
}
google::protobuf::Arena arena_;
google::api::expr::runtime::InterpreterOptions options_;
std::unique_ptr<Builder> builder_;
};
TEST_F(RegexFunctionsTest, CaptureStringSuccessWithCombinationOfGroups) {
std::vector<std::pair<CelValue, CelValue>> cel_values;
cel_values.emplace_back(std::make_pair(
CelValue::CreateString(google::protobuf::Arena::Create<std::string>(&arena_, "1")),
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "user"))));
cel_values.emplace_back(std::make_pair(
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "Username")),
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "testuser"))));
cel_values.emplace_back(
std::make_pair(CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "Domain")),
CelValue::CreateString(google::protobuf::Arena::Create<std::string>(
&arena_, "testdomain"))));
auto container_map = google::api::expr::runtime::CreateContainerBackedMap(
absl::MakeSpan(cel_values));
auto* cel_map = container_map->release();
arena_.Own(cel_map);
CelValue expected_result = CelValue::CreateMap(cel_map);
auto status = TestCaptureStringInclusion(
(R"(re.captureN('The user testuser belongs to testdomain',
'The (user|domain) (?P<Username>.*) belongs to (?P<Domain>.*)'))"));
ASSERT_OK(status.status());
EXPECT_EQ(status.value().DebugString(), expected_result.DebugString());
}
TEST_F(RegexFunctionsTest, CaptureStringSuccessWithSingleNamedGroup) {
std::vector<std::pair<CelValue, CelValue>> cel_values;
cel_values.push_back(std::make_pair(
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "username")),
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "testuser"))));
auto container_map = google::api::expr::runtime::CreateContainerBackedMap(
absl::MakeSpan(cel_values));
auto cel_map = container_map->release();
arena_.Own(cel_map);
CelValue expected_result = CelValue::CreateMap(cel_map);
auto status = TestCaptureStringInclusion((R"(re.captureN('testuser@',
'(?P<username>.*)@'))"));
ASSERT_OK(status.status());
EXPECT_EQ(status.value().DebugString(), expected_result.DebugString());
}
TEST_F(RegexFunctionsTest, CaptureStringSuccessWithMultipleUnamedGroups) {
std::vector<std::pair<CelValue, CelValue>> cel_values;
cel_values.emplace_back(std::make_pair(
CelValue::CreateString(google::protobuf::Arena::Create<std::string>(&arena_, "1")),
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "testuser"))));
cel_values.emplace_back(std::make_pair(
CelValue::CreateString(google::protobuf::Arena::Create<std::string>(&arena_, "2")),
CelValue::CreateString(
google::protobuf::Arena::Create<std::string>(&arena_, "testdomain"))));
auto container_map = google::api::expr::runtime::CreateContainerBackedMap(
absl::MakeSpan(cel_values));
auto cel_map = container_map->release();
arena_.Own(cel_map);
CelValue expected_result = CelValue::CreateMap(cel_map);
auto status =
TestCaptureStringInclusion((R"(re.captureN('testuser@testdomain',
'(.*)@([^.]*)'))"));
ASSERT_OK(status.status());
EXPECT_EQ(status.value().DebugString(), expected_result.DebugString());
}
TEST_F(RegexFunctionsTest, ExtractStringWithNamedAndUnnamedGroups) {
auto status = TestCaptureStringInclusion(
(R"(re.extract('The user testuser belongs to testdomain',
'The (user|domain) (?P<Username>.*) belongs to (?P<Domain>.*)',
'\\3 contains \\1 \\2'))"));
ASSERT_TRUE(status.value().IsString());
EXPECT_THAT(status,
IsOkAndHolds(IsCelString("testdomain contains user testuser")));
}
TEST_F(RegexFunctionsTest, ExtractStringWithEmptyStrings) {
std::string expected_result = "";
auto status = TestCaptureStringInclusion((R"(re.extract('', '', ''))"));
ASSERT_TRUE(status.value().IsString());
EXPECT_THAT(status, IsOkAndHolds(IsCelString(expected_result)));
}
TEST_F(RegexFunctionsTest, ExtractStringWithUnnamedGroups) {
auto status = TestCaptureStringInclusion(
(R"(re.extract('[email protected]', '(.*)@([^.]*)', '\\2!\\1'))"));
EXPECT_THAT(status, IsOkAndHolds(IsCelString("google!testuser")));
}
TEST_F(RegexFunctionsTest, ExtractStringWithNoGroups) {
auto status =
TestCaptureStringInclusion((R"(re.extract('foo', '.*', '\'\\0\''))"));
EXPECT_THAT(status, IsOkAndHolds(IsCelString("'foo'")));
}
TEST_F(RegexFunctionsTest, CaptureStringWithUnnamedGroups) {
auto status = TestCaptureStringInclusion((R"(re.capture('foo', 'fo(o)'))"));
EXPECT_THAT(status, IsOkAndHolds(IsCelString("o")));
}
std::vector<TestCase> createParams() {
return {
{
(R"(re.extract('foo', 'f(o+)(s)', '\\1\\2'))"),
"Unable to extract string for the given regex"},
{
(R"(re.extract('foo', 'f(o+)', '\\1\\2'))"),
"Unable to extract string for the given regex"},
{
(R"(re.extract('foo', 'f(o+)(abc', '\\1\\2'))"), "Regex is Invalid"},
{
(R"(re.capture('foo', ''))"),
"Unable to capture groups for the given regex"},
{
(R"(re.capture('foo', '.*'))"),
"Unable to capture groups for the given regex"},
{
(R"(re.capture('', 'bar'))"),
"Unable to capture groups for the given regex"},
{
(R"(re.capture('foo', 'fo(o+)(s)'))"),
"Unable to capture groups for the given regex"},
{
(R"(re.capture('foo', 'fo(o+)(abc'))"), "Regex is Invalid"},
{
(R"(re.captureN('foo', ''))"),
"Capturing groups were not found in the given regex."},
{
(R"(re.captureN('foo', '.*'))"),
"Capturing groups were not found in the given regex."},
{
(R"(re.captureN('', 'bar'))"),
"Capturing groups were not found in the given regex."},
{
(R"(re.captureN('foo', 'fo(o+)(s)'))"),
"Unable to capture groups for the given regex"},
{
(R"(re.captureN('foo', 'fo(o+)(abc'))"), "Regex is Invalid"},
};
}
TEST_P(RegexFunctionsTest, RegexFunctionsTests) {
const TestCase& test_case = GetParam();
ABSL_LOG(INFO) << "Testing Cel Expression: " << test_case.expr_string;
auto status = TestCaptureStringInclusion(test_case.expr_string);
EXPECT_THAT(
status.value(),
IsCelError(StatusIs(absl::StatusCode::kInvalidArgument,
testing::HasSubstr(test_case.expected_result))));
}
INSTANTIATE_TEST_SUITE_P(RegexFunctionsTest, RegexFunctionsTest,
testing::ValuesIn(createParams()));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/regex_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/regex_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b576bd0b-f67d-4f76-a31b-799406585c21 | cpp | tensorflow/tensorflow | pjrt_c_api | third_party/xla/xla/pjrt/c/pjrt_c_api.h | third_party/xla/xla/pjrt/c/pjrt_c_api_test.cc | #ifndef XLA_PJRT_C_PJRT_C_API_H_
#define XLA_PJRT_C_PJRT_C_API_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define PJRT_STRUCT_SIZE(struct_type, last_field) \
offsetof(struct_type, last_field) + sizeof(((struct_type*)0)->last_field)
#define PJRT_DEFINE_STRUCT_TRAITS(sname, last_field) \
typedef struct sname sname; \
enum { sname##_STRUCT_SIZE = PJRT_STRUCT_SIZE(sname, last_field) }
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
PJRT_Extension_Type_Gpu_Custom_Call = 0,
PJRT_Extension_Type_Profiler,
PJRT_Extension_Type_Custom_Partitioner,
PJRT_Extension_Type_Stream,
PJRT_Extension_Type_Layouts,
PJRT_Extension_Type_FFI,
} PJRT_Extension_Type;
typedef struct PJRT_Extension_Base {
size_t struct_size;
PJRT_Extension_Type type;
struct PJRT_Extension_Base* next;
} PJRT_Extension_Base;
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Extension_Base, next);
#define PJRT_API_MAJOR 0
#define PJRT_API_MINOR 55
struct PJRT_Api_Version {
size_t struct_size;
PJRT_Extension_Base* extension_start;
int major_version;
int minor_version;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Api_Version, minor_version);
typedef struct PJRT_Error PJRT_Error;
struct PJRT_Error_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Error* error;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_Destroy_Args, error);
typedef void PJRT_Error_Destroy(PJRT_Error_Destroy_Args* args);
struct PJRT_Error_Message_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_Error* error;
const char* message;
size_t message_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_Message_Args, message_size);
typedef void PJRT_Error_Message(PJRT_Error_Message_Args* args);
typedef enum {
PJRT_Error_Code_CANCELLED = 1,
PJRT_Error_Code_UNKNOWN = 2,
PJRT_Error_Code_INVALID_ARGUMENT = 3,
PJRT_Error_Code_DEADLINE_EXCEEDED = 4,
PJRT_Error_Code_NOT_FOUND = 5,
PJRT_Error_Code_ALREADY_EXISTS = 6,
PJRT_Error_Code_PERMISSION_DENIED = 7,
PJRT_Error_Code_RESOURCE_EXHAUSTED = 8,
PJRT_Error_Code_FAILED_PRECONDITION = 9,
PJRT_Error_Code_ABORTED = 10,
PJRT_Error_Code_OUT_OF_RANGE = 11,
PJRT_Error_Code_UNIMPLEMENTED = 12,
PJRT_Error_Code_INTERNAL = 13,
PJRT_Error_Code_UNAVAILABLE = 14,
PJRT_Error_Code_DATA_LOSS = 15,
PJRT_Error_Code_UNAUTHENTICATED = 16
} PJRT_Error_Code;
struct PJRT_Error_GetCode_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_Error* error;
PJRT_Error_Code code;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Error_GetCode_Args, code);
typedef PJRT_Error* PJRT_Error_GetCode(PJRT_Error_GetCode_Args* args);
typedef PJRT_Error* (*PJRT_CallbackError)(PJRT_Error_Code code,
const char* message,
size_t message_size);
typedef enum {
PJRT_NamedValue_kString = 0,
PJRT_NamedValue_kInt64,
PJRT_NamedValue_kInt64List,
PJRT_NamedValue_kFloat,
PJRT_NamedValue_kBool,
} PJRT_NamedValue_Type;
struct PJRT_NamedValue {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* name;
size_t name_size;
PJRT_NamedValue_Type type;
union {
const char* string_value;
int64_t int64_value;
const int64_t* int64_array_value;
float float_value;
bool bool_value;
};
size_t value_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_NamedValue, value_size);
struct PJRT_Plugin_Initialize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Plugin_Initialize_Args, extension_start);
typedef PJRT_Error* PJRT_Plugin_Initialize(PJRT_Plugin_Initialize_Args* args);
struct PJRT_Plugin_Attributes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_NamedValue* attributes;
size_t num_attributes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Plugin_Attributes_Args, attributes);
typedef PJRT_Error* PJRT_Plugin_Attributes(PJRT_Plugin_Attributes_Args* args);
typedef struct PJRT_Event PJRT_Event;
struct PJRT_Event_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Destroy_Args, event);
typedef PJRT_Error* PJRT_Event_Destroy(PJRT_Event_Destroy_Args* args);
struct PJRT_Event_IsReady_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
bool is_ready;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_IsReady_Args, is_ready);
typedef PJRT_Error* PJRT_Event_IsReady(PJRT_Event_IsReady_Args* args);
struct PJRT_Event_Error_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Error_Args, event);
typedef PJRT_Error* PJRT_Event_Error(PJRT_Event_Error_Args* args);
struct PJRT_Event_Await_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_Await_Args, event);
typedef PJRT_Error* PJRT_Event_Await(PJRT_Event_Await_Args* args);
typedef void (*PJRT_Event_OnReadyCallback)(PJRT_Error* error, void* user_arg);
struct PJRT_Event_OnReady_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Event* event;
PJRT_Event_OnReadyCallback callback;
void* user_arg;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Event_OnReady_Args, user_arg);
typedef PJRT_Error* PJRT_Event_OnReady(PJRT_Event_OnReady_Args* args);
typedef struct PJRT_Client PJRT_Client;
typedef struct PJRT_Device PJRT_Device;
typedef struct PJRT_Memory PJRT_Memory;
typedef struct PJRT_DeviceDescription PJRT_DeviceDescription;
typedef struct PJRT_TopologyDescription PJRT_TopologyDescription;
typedef struct PJRT_Executable PJRT_Executable;
typedef struct PJRT_LoadedExecutable PJRT_LoadedExecutable;
typedef struct PJRT_Buffer PJRT_Buffer;
typedef void (*PJRT_KeyValueGetCallback_ValueDeleter)(char* value);
struct PJRT_KeyValueGetCallback_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* key;
size_t key_size;
int timeout_in_ms;
PJRT_CallbackError* callback_error;
void* user_arg;
char* value;
size_t value_size;
PJRT_KeyValueGetCallback_ValueDeleter value_deleter_callback;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_KeyValueGetCallback_Args,
value_deleter_callback);
typedef PJRT_Error* (*PJRT_KeyValueGetCallback)(
PJRT_KeyValueGetCallback_Args* args);
struct PJRT_KeyValuePutCallback_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* key;
size_t key_size;
const char* value;
size_t value_size;
PJRT_CallbackError* callback_error;
void* user_arg;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_KeyValuePutCallback_Args, user_arg);
typedef PJRT_Error* (*PJRT_KeyValuePutCallback)(
PJRT_KeyValuePutCallback_Args* args);
struct PJRT_Client_Create_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_NamedValue* create_options;
size_t num_options;
PJRT_KeyValueGetCallback kv_get_callback;
void* kv_get_user_arg;
PJRT_KeyValuePutCallback kv_put_callback;
void* kv_put_user_arg;
PJRT_Client* client;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Create_Args, client);
typedef PJRT_Error* PJRT_Client_Create(PJRT_Client_Create_Args* args);
struct PJRT_Client_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Destroy_Args, client);
typedef PJRT_Error* PJRT_Client_Destroy(PJRT_Client_Destroy_Args* args);
struct PJRT_Client_PlatformName_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const char* platform_name;
size_t platform_name_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_PlatformName_Args, platform_name_size);
typedef PJRT_Error* PJRT_Client_PlatformName(
PJRT_Client_PlatformName_Args* args);
struct PJRT_Client_ProcessIndex_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int process_index;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_ProcessIndex_Args, process_index);
typedef PJRT_Error* PJRT_Client_ProcessIndex(
PJRT_Client_ProcessIndex_Args* args);
struct PJRT_Client_PlatformVersion_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const char* platform_version;
size_t platform_version_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_PlatformVersion_Args,
platform_version_size);
typedef PJRT_Error* PJRT_Client_PlatformVersion(
PJRT_Client_PlatformVersion_Args* args);
struct PJRT_Client_TopologyDescription_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_TopologyDescription* topology;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_TopologyDescription_Args, topology);
typedef PJRT_Error* PJRT_Client_TopologyDescription(
PJRT_Client_TopologyDescription_Args* args);
struct PJRT_Client_Devices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_Device* const* devices;
size_t num_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Devices_Args, num_devices);
typedef PJRT_Error* PJRT_Client_Devices(PJRT_Client_Devices_Args* args);
struct PJRT_Client_AddressableDevices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_Device* const* addressable_devices;
size_t num_addressable_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_AddressableDevices_Args,
num_addressable_devices);
typedef PJRT_Error* PJRT_Client_AddressableDevices(
PJRT_Client_AddressableDevices_Args* args);
struct PJRT_Client_LookupDevice_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int id;
PJRT_Device* device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_LookupDevice_Args, device);
typedef PJRT_Error* PJRT_Client_LookupDevice(
PJRT_Client_LookupDevice_Args* args);
struct PJRT_Client_LookupAddressableDevice_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int local_hardware_id;
PJRT_Device* addressable_device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_LookupAddressableDevice_Args,
addressable_device);
typedef PJRT_Error* PJRT_Client_LookupAddressableDevice(
PJRT_Client_LookupAddressableDevice_Args* args);
struct PJRT_Client_AddressableMemories_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
PJRT_Memory* const* addressable_memories;
size_t num_addressable_memories;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_AddressableMemories_Args,
num_addressable_memories);
typedef PJRT_Error* PJRT_Client_AddressableMemories(
PJRT_Client_AddressableMemories_Args* args);
struct PJRT_Program {
size_t struct_size;
PJRT_Extension_Base* extension_start;
char* code;
size_t code_size;
const char* format;
size_t format_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Program, format_size);
struct PJRT_Client_Compile_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const PJRT_Program* program;
const char* compile_options;
size_t compile_options_size;
PJRT_LoadedExecutable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_Compile_Args, executable);
typedef PJRT_Error* PJRT_Client_Compile(PJRT_Client_Compile_Args* args);
struct PJRT_Client_DefaultDeviceAssignment_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
int num_replicas;
int num_partitions;
size_t default_assignment_size;
int* default_assignment;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_DefaultDeviceAssignment_Args,
default_assignment);
typedef PJRT_Error* PJRT_Client_DefaultDeviceAssignment(
PJRT_Client_DefaultDeviceAssignment_Args* args);
typedef enum {
PJRT_Buffer_Type_INVALID,
PJRT_Buffer_Type_PRED,
PJRT_Buffer_Type_S8,
PJRT_Buffer_Type_S16,
PJRT_Buffer_Type_S32,
PJRT_Buffer_Type_S64,
PJRT_Buffer_Type_U8,
PJRT_Buffer_Type_U16,
PJRT_Buffer_Type_U32,
PJRT_Buffer_Type_U64,
PJRT_Buffer_Type_F16,
PJRT_Buffer_Type_F32,
PJRT_Buffer_Type_F64,
PJRT_Buffer_Type_BF16,
PJRT_Buffer_Type_C64,
PJRT_Buffer_Type_C128,
PJRT_Buffer_Type_F8E5M2,
PJRT_Buffer_Type_F8E4M3FN,
PJRT_Buffer_Type_F8E4M3B11FNUZ,
PJRT_Buffer_Type_F8E5M2FNUZ,
PJRT_Buffer_Type_F8E4M3FNUZ,
PJRT_Buffer_Type_S4,
PJRT_Buffer_Type_U4,
PJRT_Buffer_Type_TOKEN,
PJRT_Buffer_Type_S2,
PJRT_Buffer_Type_U2,
PJRT_Buffer_Type_F8E4M3,
PJRT_Buffer_Type_F8E3M4,
} PJRT_Buffer_Type;
typedef enum {
PJRT_HostBufferSemantics_kImmutableOnlyDuringCall,
PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes,
PJRT_HostBufferSemantics_kImmutableZeroCopy,
PJRT_HostBufferSemantics_kMutableZeroCopy,
} PJRT_HostBufferSemantics;
typedef enum {
PJRT_Buffer_MemoryLayout_Type_Tiled = 0,
PJRT_Buffer_MemoryLayout_Type_Strides,
} PJRT_Buffer_MemoryLayout_Type;
struct PJRT_Buffer_MemoryLayout_Tiled {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const int64_t* minor_to_major;
size_t minor_to_major_size;
const int64_t* tile_dims;
const size_t* tile_dim_sizes;
size_t num_tiles;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout_Tiled, num_tiles);
struct PJRT_Buffer_MemoryLayout_Strides {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const int64_t* byte_strides;
size_t num_byte_strides;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout_Strides, num_byte_strides);
struct PJRT_Buffer_MemoryLayout {
size_t struct_size;
PJRT_Extension_Base* extension_start;
union {
PJRT_Buffer_MemoryLayout_Tiled tiled;
PJRT_Buffer_MemoryLayout_Strides strides;
};
PJRT_Buffer_MemoryLayout_Type type;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_MemoryLayout, type);
struct PJRT_Client_BufferFromHostBuffer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const void* data;
PJRT_Buffer_Type type;
const int64_t* dims;
size_t num_dims;
const int64_t* byte_strides;
size_t num_byte_strides;
PJRT_HostBufferSemantics host_buffer_semantics;
PJRT_Device* device;
PJRT_Memory* memory;
PJRT_Buffer_MemoryLayout* device_layout;
PJRT_Event* done_with_host_buffer;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_BufferFromHostBuffer_Args, buffer);
typedef PJRT_Error* PJRT_Client_BufferFromHostBuffer(
PJRT_Client_BufferFromHostBuffer_Args* args);
struct PJRT_Client_CreateViewOfDeviceBuffer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
void* device_buffer_ptr;
const int64_t* dims;
size_t num_dims;
PJRT_Buffer_Type element_type;
PJRT_Buffer_MemoryLayout* layout;
PJRT_Device* device;
void (*on_delete_callback)(void* device_buffer_ptr, void* user_arg);
void* on_delete_callback_arg;
intptr_t stream;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Client_CreateViewOfDeviceBuffer_Args, buffer);
typedef PJRT_Error* PJRT_Client_CreateViewOfDeviceBuffer(
PJRT_Client_CreateViewOfDeviceBuffer_Args* args);
struct PJRT_DeviceDescription_Id_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
int id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Id_Args, id);
typedef PJRT_Error* PJRT_DeviceDescription_Id(
PJRT_DeviceDescription_Id_Args* args);
struct PJRT_DeviceDescription_ProcessIndex_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
int process_index;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_ProcessIndex_Args,
process_index);
typedef PJRT_Error* PJRT_DeviceDescription_ProcessIndex(
PJRT_DeviceDescription_ProcessIndex_Args* args);
struct PJRT_DeviceDescription_Attributes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
size_t num_attributes;
const PJRT_NamedValue* attributes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Attributes_Args, attributes);
typedef PJRT_Error* PJRT_DeviceDescription_Attributes(
PJRT_DeviceDescription_Attributes_Args* args);
struct PJRT_DeviceDescription_Kind_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
const char* device_kind;
size_t device_kind_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_Kind_Args, device_kind_size);
typedef PJRT_Error* PJRT_DeviceDescription_Kind(
PJRT_DeviceDescription_Kind_Args* args);
struct PJRT_DeviceDescription_DebugString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
const char* debug_string;
size_t debug_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_DebugString_Args,
debug_string_size);
typedef PJRT_Error* PJRT_DeviceDescription_DebugString(
PJRT_DeviceDescription_DebugString_Args* args);
struct PJRT_DeviceDescription_ToString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_DeviceDescription* device_description;
const char* to_string;
size_t to_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_DeviceDescription_ToString_Args, to_string_size);
typedef PJRT_Error* PJRT_DeviceDescription_ToString(
PJRT_DeviceDescription_ToString_Args* args);
struct PJRT_Device_GetDescription_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
PJRT_DeviceDescription* device_description;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_GetDescription_Args, device_description);
typedef PJRT_Error* PJRT_Device_GetDescription(
PJRT_Device_GetDescription_Args* args);
struct PJRT_Device_IsAddressable_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
bool is_addressable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_IsAddressable_Args, is_addressable);
typedef PJRT_Error* PJRT_Device_IsAddressable(
PJRT_Device_IsAddressable_Args* args);
struct PJRT_Device_LocalHardwareId_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
int local_hardware_id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_LocalHardwareId_Args, local_hardware_id);
typedef PJRT_Error* PJRT_Device_LocalHardwareId(
PJRT_Device_LocalHardwareId_Args* args);
struct PJRT_Device_AddressableMemories_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
PJRT_Memory* const* memories;
size_t num_memories;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_AddressableMemories_Args, num_memories);
typedef PJRT_Error* PJRT_Device_AddressableMemories(
PJRT_Device_AddressableMemories_Args* args);
struct PJRT_Device_DefaultMemory_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
PJRT_Memory* memory;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_DefaultMemory_Args, memory);
typedef PJRT_Error* PJRT_Device_DefaultMemory(
PJRT_Device_DefaultMemory_Args* args);
struct PJRT_Device_MemoryStats_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Device* device;
int64_t bytes_in_use;
int64_t peak_bytes_in_use;
bool peak_bytes_in_use_is_set;
int64_t num_allocs;
bool num_allocs_is_set;
int64_t largest_alloc_size;
bool largest_alloc_size_is_set;
int64_t bytes_limit;
bool bytes_limit_is_set;
int64_t bytes_reserved;
bool bytes_reserved_is_set;
int64_t peak_bytes_reserved;
bool peak_bytes_reserved_is_set;
int64_t bytes_reservable_limit;
bool bytes_reservable_limit_is_set;
int64_t largest_free_block_bytes;
bool largest_free_block_bytes_is_set;
int64_t pool_bytes;
bool pool_bytes_is_set;
int64_t peak_pool_bytes;
bool peak_pool_bytes_is_set;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Device_MemoryStats_Args, peak_pool_bytes_is_set);
typedef PJRT_Error* PJRT_Device_MemoryStats(PJRT_Device_MemoryStats_Args* args);
struct PJRT_Memory_Id_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
int id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_Id_Args, id);
typedef PJRT_Error* PJRT_Memory_Id(PJRT_Memory_Id_Args* args);
struct PJRT_Memory_Kind_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
const char* kind;
size_t kind_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_Kind_Args, kind_size);
typedef PJRT_Error* PJRT_Memory_Kind(PJRT_Memory_Kind_Args* args);
struct PJRT_Memory_Kind_Id_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
int kind_id;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_Kind_Id_Args, kind_id);
typedef PJRT_Error* PJRT_Memory_Kind_Id(PJRT_Memory_Kind_Id_Args* args);
struct PJRT_Memory_DebugString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
const char* debug_string;
size_t debug_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_DebugString_Args, debug_string_size);
typedef PJRT_Error* PJRT_Memory_DebugString(PJRT_Memory_DebugString_Args* args);
struct PJRT_Memory_ToString_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
const char* to_string;
size_t to_string_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_ToString_Args, to_string_size);
typedef PJRT_Error* PJRT_Memory_ToString(PJRT_Memory_ToString_Args* args);
struct PJRT_Memory_AddressableByDevices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Memory* memory;
PJRT_Device* const* devices;
size_t num_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Memory_AddressableByDevices_Args, num_devices);
typedef PJRT_Error* PJRT_Memory_AddressableByDevices(
PJRT_Memory_AddressableByDevices_Args* args);
typedef struct PJRT_ExecuteContext PJRT_ExecuteContext;
struct PJRT_ExecuteContext_Create_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_ExecuteContext* context;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_ExecuteContext_Create_Args, context);
typedef PJRT_Error* PJRT_ExecuteContext_Create(
PJRT_ExecuteContext_Create_Args* args);
struct PJRT_ExecuteContext_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_ExecuteContext* context;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_ExecuteContext_Destroy_Args, context);
typedef PJRT_Error* PJRT_ExecuteContext_Destroy(
PJRT_ExecuteContext_Destroy_Args* args);
struct PJRT_Executable_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Destroy_Args, executable);
typedef PJRT_Error* PJRT_Executable_Destroy(PJRT_Executable_Destroy_Args* args);
struct PJRT_LoadedExecutable_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Destroy_Args, executable);
typedef PJRT_Error* PJRT_LoadedExecutable_Destroy(
PJRT_LoadedExecutable_Destroy_Args* args);
struct PJRT_LoadedExecutable_GetExecutable_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* loaded_executable;
PJRT_Executable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_GetExecutable_Args, executable);
typedef PJRT_Error* PJRT_LoadedExecutable_GetExecutable(
PJRT_LoadedExecutable_GetExecutable_Args* args);
struct PJRT_Executable_Name_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
const char* executable_name;
size_t executable_name_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Name_Args, executable_name_size);
typedef PJRT_Error* PJRT_Executable_Name(PJRT_Executable_Name_Args* args);
struct PJRT_Executable_NumReplicas_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_replicas;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_NumReplicas_Args, num_replicas);
typedef PJRT_Error* PJRT_Executable_NumReplicas(
PJRT_Executable_NumReplicas_Args* args);
struct PJRT_Executable_NumPartitions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_partitions;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_NumPartitions_Args, num_partitions);
typedef PJRT_Error* PJRT_Executable_NumPartitions(
PJRT_Executable_NumPartitions_Args* args);
struct PJRT_LoadedExecutable_AddressableDevices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
PJRT_Device* const* addressable_devices;
size_t num_addressable_devices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_AddressableDevices_Args,
num_addressable_devices);
typedef PJRT_Error* PJRT_LoadedExecutable_AddressableDevices(
PJRT_LoadedExecutable_AddressableDevices_Args* args);
struct PJRT_Executable_OptimizedProgram_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
PJRT_Program* program;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OptimizedProgram_Args, program);
typedef PJRT_Error* PJRT_Executable_OptimizedProgram(
PJRT_Executable_OptimizedProgram_Args* args);
struct PJRT_LoadedExecutable_Delete_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Delete_Args, executable);
typedef PJRT_Error* PJRT_LoadedExecutable_Delete(
PJRT_LoadedExecutable_Delete_Args* args);
struct PJRT_LoadedExecutable_IsDeleted_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
bool is_deleted;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_IsDeleted_Args, is_deleted);
typedef PJRT_Error* PJRT_LoadedExecutable_IsDeleted(
PJRT_LoadedExecutable_IsDeleted_Args* args);
typedef struct PJRT_Chunk {
void* data;
size_t size;
void (*deleter)(void* data, void* deleter_arg);
void* deleter_arg;
} PJRT_Chunk;
typedef struct PJRT_CopyToDeviceStream PJRT_CopyToDeviceStream;
struct PJRT_TransferMetadata;
typedef PJRT_Error* (*PJRT_SendCallback)(PJRT_Chunk* chunk,
PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done,
void* user_arg);
typedef void (*PJRT_RecvCallback)(PJRT_CopyToDeviceStream* stream,
void* user_arg);
struct PJRT_SendCallbackInfo {
int64_t channel_id;
void* user_arg;
PJRT_SendCallback send_callback;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_SendCallbackInfo, send_callback);
struct PJRT_RecvCallbackInfo {
int64_t channel_id;
void* user_arg;
PJRT_RecvCallback recv_callback;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_RecvCallbackInfo, recv_callback);
struct PJRT_ExecuteOptions {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_SendCallbackInfo** send_callbacks;
PJRT_RecvCallbackInfo** recv_callbacks;
size_t num_send_ops;
size_t num_recv_ops;
int launch_id;
const int64_t* non_donatable_input_indices;
size_t num_non_donatable_input_indices;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_ExecuteOptions, num_non_donatable_input_indices);
struct PJRT_LoadedExecutable_Execute_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
PJRT_ExecuteOptions* options;
PJRT_Buffer* const* const* argument_lists;
size_t num_devices;
size_t num_args;
PJRT_Buffer** const* output_lists;
PJRT_Event** device_complete_events;
PJRT_Device* execute_device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Execute_Args, execute_device);
typedef PJRT_Error* PJRT_LoadedExecutable_Execute(
PJRT_LoadedExecutable_Execute_Args* args);
struct PJRT_Executable_NumOutputs_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_outputs;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_NumOutputs_Args, num_outputs);
typedef PJRT_Error* PJRT_Executable_NumOutputs(
PJRT_Executable_NumOutputs_Args* args);
struct PJRT_Executable_SizeOfGeneratedCodeInBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
int64_t size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_SizeOfGeneratedCodeInBytes_Args,
size_in_bytes);
typedef PJRT_Error* PJRT_Executable_SizeOfGeneratedCodeInBytes(
PJRT_Executable_SizeOfGeneratedCodeInBytes_Args* args);
struct PJRT_Executable_Fingerprint_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
const char* executable_fingerprint;
size_t executable_fingerprint_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Fingerprint_Args,
executable_fingerprint_size);
typedef PJRT_Error* PJRT_Executable_Fingerprint(
PJRT_Executable_Fingerprint_Args* args);
struct PJRT_Executable_GetCostAnalysis_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_properties;
const PJRT_NamedValue* properties;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_GetCostAnalysis_Args, properties);
typedef PJRT_Error* PJRT_Executable_GetCostAnalysis(
PJRT_Executable_GetCostAnalysis_Args* args);
struct PJRT_Executable_GetCompiledMemoryStats_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
int64_t generated_code_size_in_bytes;
int64_t argument_size_in_bytes;
int64_t output_size_in_bytes;
int64_t alias_size_in_bytes;
int64_t temp_size_in_bytes;
int64_t host_generated_code_size_in_bytes;
int64_t host_argument_size_in_bytes;
int64_t host_output_size_in_bytes;
int64_t host_alias_size_in_bytes;
int64_t host_temp_size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_GetCompiledMemoryStats_Args,
host_temp_size_in_bytes);
typedef PJRT_Error* PJRT_Executable_GetCompiledMemoryStats(
PJRT_Executable_GetCompiledMemoryStats_Args* args);
struct PJRT_Executable_OutputElementTypes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
PJRT_Buffer_Type* output_types;
size_t num_output_types;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OutputElementTypes_Args,
num_output_types);
typedef PJRT_Error* PJRT_Executable_OutputElementTypes(
PJRT_Executable_OutputElementTypes_Args* args);
struct PJRT_Executable_OutputDimensions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_outputs;
const int64_t* dims;
const size_t* dim_sizes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OutputDimensions_Args, dim_sizes);
typedef PJRT_Error* PJRT_Executable_OutputDimensions(
PJRT_Executable_OutputDimensions_Args* args);
struct PJRT_Executable_OutputMemoryKinds_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Executable* executable;
size_t num_outputs;
const char* const* memory_kinds;
const size_t* memory_kind_sizes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_OutputMemoryKinds_Args,
memory_kind_sizes);
typedef PJRT_Error* PJRT_Executable_OutputMemoryKinds(
PJRT_Executable_OutputMemoryKinds_Args* args);
typedef struct PJRT_SerializedExecutable PJRT_SerializedExecutable;
struct PJRT_Executable_Serialize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_Executable* executable;
const char* serialized_bytes;
size_t serialized_bytes_size;
PJRT_SerializedExecutable* serialized_executable;
void (*serialized_executable_deleter)(
PJRT_SerializedExecutable* exec);
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_Serialize_Args,
serialized_executable_deleter);
typedef PJRT_Error* PJRT_Executable_Serialize(
PJRT_Executable_Serialize_Args* args);
struct PJRT_Executable_DeserializeAndLoad_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Client* client;
const char* serialized_executable;
size_t serialized_executable_size;
PJRT_LoadedExecutable* loaded_executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Executable_DeserializeAndLoad_Args,
loaded_executable);
typedef PJRT_Error* PJRT_Executable_DeserializeAndLoad(
PJRT_Executable_DeserializeAndLoad_Args* args);
struct PJRT_LoadedExecutable_Fingerprint_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_LoadedExecutable* executable;
const char* executable_fingerprint;
size_t executable_fingerprint_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_LoadedExecutable_Fingerprint_Args,
executable_fingerprint_size);
typedef PJRT_Error* PJRT_LoadedExecutable_Fingerprint(
PJRT_LoadedExecutable_Fingerprint_Args* args);
struct PJRT_Buffer_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Destroy_Args, buffer);
typedef PJRT_Error* PJRT_Buffer_Destroy(PJRT_Buffer_Destroy_Args* args);
struct PJRT_Buffer_ElementType_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Buffer_Type type;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_ElementType_Args, type);
typedef PJRT_Error* PJRT_Buffer_ElementType(PJRT_Buffer_ElementType_Args* args);
struct PJRT_Buffer_Dimensions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
const int64_t* dims;
size_t num_dims;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Dimensions_Args, num_dims);
typedef PJRT_Error* PJRT_Buffer_Dimensions(PJRT_Buffer_Dimensions_Args* args);
struct PJRT_Buffer_UnpaddedDimensions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
const int64_t* unpadded_dims;
size_t num_dims;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_UnpaddedDimensions_Args, num_dims);
typedef PJRT_Error* PJRT_Buffer_UnpaddedDimensions(
PJRT_Buffer_UnpaddedDimensions_Args* args);
struct PJRT_Buffer_DynamicDimensionIndices_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
const size_t* dynamic_dim_indices;
size_t num_dynamic_dims;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_DynamicDimensionIndices_Args,
num_dynamic_dims);
typedef PJRT_Error* PJRT_Buffer_DynamicDimensionIndices(
PJRT_Buffer_DynamicDimensionIndices_Args* args);
struct PJRT_Buffer_GetMemoryLayout_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Buffer_MemoryLayout layout;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_GetMemoryLayout_Args, layout);
typedef PJRT_Error* PJRT_Buffer_GetMemoryLayout(
PJRT_Buffer_GetMemoryLayout_Args* args);
struct PJRT_Buffer_ToHostBuffer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* src;
PJRT_Buffer_MemoryLayout* host_layout;
void* dst;
size_t dst_size;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_ToHostBuffer_Args, event);
typedef PJRT_Error* PJRT_Buffer_ToHostBuffer(
PJRT_Buffer_ToHostBuffer_Args* args);
struct PJRT_Buffer_OnDeviceSizeInBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
size_t on_device_size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_OnDeviceSizeInBytes_Args,
on_device_size_in_bytes);
typedef PJRT_Error* PJRT_Buffer_OnDeviceSizeInBytes(
PJRT_Buffer_OnDeviceSizeInBytes_Args* args);
struct PJRT_Buffer_Delete_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Delete_Args, buffer);
typedef PJRT_Error* PJRT_Buffer_Delete(PJRT_Buffer_Delete_Args* args);
struct PJRT_Buffer_IsDeleted_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
bool is_deleted;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_IsDeleted_Args, is_deleted);
typedef PJRT_Error* PJRT_Buffer_IsDeleted(PJRT_Buffer_IsDeleted_Args* args);
struct PJRT_Buffer_CopyToDevice_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Device* dst_device;
PJRT_Buffer* dst_buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_CopyToDevice_Args, dst_buffer);
typedef PJRT_Error* PJRT_Buffer_CopyToDevice(
PJRT_Buffer_CopyToDevice_Args* args);
struct PJRT_Buffer_CopyToMemory_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Memory* dst_memory;
PJRT_Buffer* dst_buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_CopyToMemory_Args, dst_buffer);
typedef PJRT_Error* PJRT_Buffer_CopyToMemory(
PJRT_Buffer_CopyToMemory_Args* args);
struct PJRT_Buffer_IsOnCpu_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
bool is_on_cpu;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_IsOnCpu_Args, is_on_cpu);
typedef PJRT_Error* PJRT_Buffer_IsOnCpu(PJRT_Buffer_IsOnCpu_Args* args);
struct PJRT_Buffer_Device_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Device* device;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Device_Args, device);
typedef PJRT_Error* PJRT_Buffer_Device(PJRT_Buffer_Device_Args* args);
struct PJRT_Buffer_Memory_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Memory* memory;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_Memory_Args, memory);
typedef PJRT_Error* PJRT_Buffer_Memory(PJRT_Buffer_Memory_Args* args);
struct PJRT_Buffer_ReadyEvent_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
PJRT_Event* event;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_ReadyEvent_Args, event);
typedef PJRT_Error* PJRT_Buffer_ReadyEvent(PJRT_Buffer_ReadyEvent_Args* args);
struct PJRT_Buffer_UnsafePointer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
uintptr_t buffer_pointer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_UnsafePointer_Args, buffer_pointer);
typedef PJRT_Error* PJRT_Buffer_UnsafePointer(
PJRT_Buffer_UnsafePointer_Args* args);
struct PJRT_Buffer_IncreaseExternalReferenceCount_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_IncreaseExternalReferenceCount_Args,
buffer);
typedef PJRT_Error* PJRT_Buffer_IncreaseExternalReferenceCount(
PJRT_Buffer_IncreaseExternalReferenceCount_Args* args);
struct PJRT_Buffer_DecreaseExternalReferenceCount_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_DecreaseExternalReferenceCount_Args,
buffer);
typedef PJRT_Error* PJRT_Buffer_DecreaseExternalReferenceCount(
PJRT_Buffer_DecreaseExternalReferenceCount_Args* args);
struct PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Buffer* buffer;
void* device_memory_ptr;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args,
device_memory_ptr);
typedef PJRT_Error* PJRT_Buffer_OpaqueDeviceMemoryDataPointer(
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args* args);
struct PJRT_CopyToDeviceStream_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_Destroy_Args, stream);
typedef PJRT_Error* PJRT_CopyToDeviceStream_Destroy(
PJRT_CopyToDeviceStream_Destroy_Args* args);
struct PJRT_CopyToDeviceStream_AddChunk_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
PJRT_Chunk* chunk;
PJRT_Event* transfer_complete;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_AddChunk_Args,
transfer_complete);
typedef PJRT_Error* PJRT_CopyToDeviceStream_AddChunk(
PJRT_CopyToDeviceStream_AddChunk_Args* args);
struct PJRT_CopyToDeviceStream_TotalBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
int64_t total_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_TotalBytes_Args, total_bytes);
typedef PJRT_Error* PJRT_CopyToDeviceStream_TotalBytes(
PJRT_CopyToDeviceStream_TotalBytes_Args* args);
struct PJRT_CopyToDeviceStream_GranuleSize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
int64_t granule_size_in_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_GranuleSize_Args,
granule_size_in_bytes);
typedef PJRT_Error* PJRT_CopyToDeviceStream_GranuleSize(
PJRT_CopyToDeviceStream_GranuleSize_Args* args);
struct PJRT_CopyToDeviceStream_CurrentBytes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_CopyToDeviceStream* stream;
int64_t current_bytes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_CopyToDeviceStream_CurrentBytes_Args,
current_bytes);
typedef PJRT_Error* PJRT_CopyToDeviceStream_CurrentBytes(
PJRT_CopyToDeviceStream_CurrentBytes_Args* args);
struct PJRT_TopologyDescription_Create_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const char* topology_name;
size_t topology_name_size;
const PJRT_NamedValue* create_options;
size_t num_options;
PJRT_TopologyDescription* topology;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Create_Args, topology);
typedef PJRT_Error* PJRT_TopologyDescription_Create(
PJRT_TopologyDescription_Create_Args* args);
struct PJRT_TopologyDescription_Destroy_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Destroy_Args, topology);
typedef PJRT_Error* PJRT_TopologyDescription_Destroy(
PJRT_TopologyDescription_Destroy_Args* args);
struct PJRT_TopologyDescription_PlatformVersion_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const char* platform_version;
size_t platform_version_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_PlatformVersion_Args,
platform_version_size);
typedef PJRT_Error* PJRT_TopologyDescription_PlatformVersion(
PJRT_TopologyDescription_PlatformVersion_Args* args);
struct PJRT_TopologyDescription_PlatformName_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const char* platform_name;
size_t platform_name_size;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_PlatformName_Args,
platform_name_size);
typedef PJRT_Error* PJRT_TopologyDescription_PlatformName(
PJRT_TopologyDescription_PlatformName_Args* args);
struct PJRT_TopologyDescription_GetDeviceDescriptions_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
PJRT_DeviceDescription* const* descriptions;
size_t num_descriptions;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_GetDeviceDescriptions_Args,
num_descriptions);
typedef PJRT_Error* PJRT_TopologyDescription_GetDeviceDescriptions(
PJRT_TopologyDescription_GetDeviceDescriptions_Args* args);
typedef struct PJRT_SerializedTopology PJRT_SerializedTopology;
struct PJRT_TopologyDescription_Serialize_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const char* serialized_bytes;
size_t serialized_bytes_size;
PJRT_SerializedTopology* serialized_topology;
void (*serialized_topology_deleter)(
PJRT_SerializedTopology* serialized_topology);
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Serialize_Args,
serialized_topology_deleter);
typedef PJRT_Error* PJRT_TopologyDescription_Serialize(
PJRT_TopologyDescription_Serialize_Args* args);
struct PJRT_TopologyDescription_Attributes_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_TopologyDescription* topology;
const PJRT_NamedValue* attributes;
size_t num_attributes;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_TopologyDescription_Attributes_Args,
num_attributes);
typedef PJRT_Error* PJRT_TopologyDescription_Attributes(
PJRT_TopologyDescription_Attributes_Args* args);
struct PJRT_Compile_Args {
size_t struct_size;
PJRT_Extension_Base* extension_start;
const PJRT_TopologyDescription* topology;
const PJRT_Program* program;
const char* compile_options;
size_t compile_options_size;
PJRT_Client* client;
PJRT_Executable* executable;
};
PJRT_DEFINE_STRUCT_TRAITS(PJRT_Compile_Args, executable);
typedef PJRT_Error* PJRT_Compile(PJRT_Compile_Args* args);
#define _PJRT_API_STRUCT_FIELD(fn_type) fn_type* fn_type
typedef struct PJRT_Api {
size_t struct_size;
PJRT_Extension_Base* extension_start;
PJRT_Api_Version pjrt_api_version;
_PJRT_API_STRUCT_FIELD(PJRT_Error_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Error_Message);
_PJRT_API_STRUCT_FIELD(PJRT_Error_GetCode);
_PJRT_API_STRUCT_FIELD(PJRT_Plugin_Initialize);
_PJRT_API_STRUCT_FIELD(PJRT_Plugin_Attributes);
_PJRT_API_STRUCT_FIELD(PJRT_Event_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Event_IsReady);
_PJRT_API_STRUCT_FIELD(PJRT_Event_Error);
_PJRT_API_STRUCT_FIELD(PJRT_Event_Await);
_PJRT_API_STRUCT_FIELD(PJRT_Event_OnReady);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Create);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Client_PlatformName);
_PJRT_API_STRUCT_FIELD(PJRT_Client_ProcessIndex);
_PJRT_API_STRUCT_FIELD(PJRT_Client_PlatformVersion);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Devices);
_PJRT_API_STRUCT_FIELD(PJRT_Client_AddressableDevices);
_PJRT_API_STRUCT_FIELD(PJRT_Client_LookupDevice);
_PJRT_API_STRUCT_FIELD(PJRT_Client_LookupAddressableDevice);
_PJRT_API_STRUCT_FIELD(PJRT_Client_AddressableMemories);
_PJRT_API_STRUCT_FIELD(PJRT_Client_Compile);
_PJRT_API_STRUCT_FIELD(PJRT_Client_DefaultDeviceAssignment);
_PJRT_API_STRUCT_FIELD(PJRT_Client_BufferFromHostBuffer);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_Id);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_ProcessIndex);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_Attributes);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_Kind);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_DebugString);
_PJRT_API_STRUCT_FIELD(PJRT_DeviceDescription_ToString);
_PJRT_API_STRUCT_FIELD(PJRT_Device_GetDescription);
_PJRT_API_STRUCT_FIELD(PJRT_Device_IsAddressable);
_PJRT_API_STRUCT_FIELD(PJRT_Device_LocalHardwareId);
_PJRT_API_STRUCT_FIELD(PJRT_Device_AddressableMemories);
_PJRT_API_STRUCT_FIELD(PJRT_Device_DefaultMemory);
_PJRT_API_STRUCT_FIELD(PJRT_Device_MemoryStats);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_Id);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_Kind);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_DebugString);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_ToString);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_AddressableByDevices);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Name);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_NumReplicas);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_NumPartitions);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_NumOutputs);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_SizeOfGeneratedCodeInBytes);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_GetCostAnalysis);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OutputMemoryKinds);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OptimizedProgram);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Serialize);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_GetExecutable);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_AddressableDevices);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Delete);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_IsDeleted);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Execute);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_DeserializeAndLoad);
_PJRT_API_STRUCT_FIELD(PJRT_LoadedExecutable_Fingerprint);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_ElementType);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Dimensions);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_UnpaddedDimensions);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_DynamicDimensionIndices);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_GetMemoryLayout);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_OnDeviceSizeInBytes);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Device);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Memory);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_Delete);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_IsDeleted);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_CopyToDevice);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_ToHostBuffer);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_IsOnCpu);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_ReadyEvent);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_UnsafePointer);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_IncreaseExternalReferenceCount);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_DecreaseExternalReferenceCount);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_OpaqueDeviceMemoryDataPointer);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_AddChunk);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_TotalBytes);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_GranuleSize);
_PJRT_API_STRUCT_FIELD(PJRT_CopyToDeviceStream_CurrentBytes);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Create);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Destroy);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_PlatformName);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_PlatformVersion);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_GetDeviceDescriptions);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Serialize);
_PJRT_API_STRUCT_FIELD(PJRT_TopologyDescription_Attributes);
_PJRT_API_STRUCT_FIELD(PJRT_Compile);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OutputElementTypes);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_OutputDimensions);
_PJRT_API_STRUCT_FIELD(PJRT_Buffer_CopyToMemory);
_PJRT_API_STRUCT_FIELD(PJRT_Client_CreateViewOfDeviceBuffer);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_Fingerprint);
_PJRT_API_STRUCT_FIELD(PJRT_Client_TopologyDescription);
_PJRT_API_STRUCT_FIELD(PJRT_Executable_GetCompiledMemoryStats);
_PJRT_API_STRUCT_FIELD(PJRT_Memory_Kind_Id);
_PJRT_API_STRUCT_FIELD(PJRT_ExecuteContext_Create);
_PJRT_API_STRUCT_FIELD(PJRT_ExecuteContext_Destroy);
} PJRT_Api;
enum {
PJRT_Api_STRUCT_SIZE =
PJRT_STRUCT_SIZE(PJRT_Api, PJRT_Client_TopologyDescription)
};
#undef _PJRT_API_STRUCT_FIELD
#ifdef __cplusplus
}
#endif
#endif | #include "xla/pjrt/c/pjrt_c_api_test.h"
#include <cstddef>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_test_base.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace pjrt {
namespace {
constexpr absl::string_view module_add_one =
R"(module {
func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
%0 = "mhlo.copy"(%arg0) : (tensor<f32>) -> tensor<f32>
%1 = mhlo.constant dense<1.000000e+00> : tensor<f32>
%2 = mhlo.add %0, %1 : tensor<f32>
return %2 : tensor<f32>
}})";
constexpr absl::string_view kHloString =
R"(
HloModule TupleCreate_module:
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)";
class TestCApiFactory {
public:
void Register(std::function<const PJRT_Api*()> factory,
absl::string_view platform_name) {
absl::MutexLock lock(&mu_);
CHECK(!factory_);
factory_ = std::move(factory);
CHECK(platform_name_.empty()) << "Platform name already provided";
CHECK(!platform_name.empty()) << "Provided platform name is empty";
platform_name_ = platform_name;
}
std::function<const PJRT_Api*()> Get() const {
absl::MutexLock lock(&mu_);
CHECK(factory_) << "Test didn't call RegisterPjRtCApiTestFactory()";
return factory_;
}
std::string GetPlatformName() const {
absl::MutexLock lock(&mu_);
CHECK(!platform_name_.empty())
<< "Test didn't call RegisterPjRtCApiTestFactory()";
return platform_name_;
}
private:
mutable absl::Mutex mu_;
std::function<const PJRT_Api*()> factory_ ABSL_GUARDED_BY(mu_);
std::string platform_name_;
};
TestCApiFactory& GetGlobalTestCApiFactory() {
static auto* const factory = new TestCApiFactory;
return *factory;
}
const PJRT_Api* GetCApi() { return GetGlobalTestCApiFactory().Get()(); }
std::string GetPlatformName() {
return GetGlobalTestCApiFactory().GetPlatformName();
}
}
void RegisterPjRtCApiTestFactory(std::function<const PJRT_Api*()> factory,
absl::string_view platform_name) {
GetGlobalTestCApiFactory().Register(std::move(factory), platform_name);
}
namespace {
class PjrtCApiTest : public PjrtCApiTestBase {
protected:
PjrtCApiTest() : PjrtCApiTestBase(GetCApi()) {}
std::string platform_name_ = GetPlatformName();
};
TEST_F(PjrtCApiTest, ApiVersion) {
CHECK_EQ(api_->pjrt_api_version.major_version, PJRT_API_MAJOR);
CHECK_EQ(api_->pjrt_api_version.minor_version, PJRT_API_MINOR);
}
TEST_F(PjrtCApiTest, PlatformName) {
PJRT_Client_PlatformName_Args args;
args.client = client_;
args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
PJRT_Error* error = api_->PJRT_Client_PlatformName(&args);
ASSERT_EQ(error, nullptr);
absl::string_view platform_name(args.platform_name, args.platform_name_size);
ASSERT_EQ(platform_name_, platform_name);
}
TEST_F(PjrtCApiTest, ClientProcessIndex) {
PJRT_Client_ProcessIndex_Args process_index_args =
PJRT_Client_ProcessIndex_Args{
.struct_size = PJRT_Client_ProcessIndex_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.process_index = -1,
};
PJRT_Error* error = api_->PJRT_Client_ProcessIndex(&process_index_args);
CHECK_EQ(error, nullptr);
CHECK_EQ(process_index_args.process_index, 0);
}
TEST_F(PjrtCApiTest, ClientDevices) {
absl::Span<PJRT_Device* const> devices = GetClientDevices();
ASSERT_FALSE(devices.empty());
for (auto& device : devices) {
ASSERT_TRUE(this->IsValidDeviceId(device));
}
}
TEST_F(PjrtCApiTest, ClientAddressableDevices) {
absl::Span<PJRT_Device* const> addressable_devices =
GetClientAddressableDevices();
ASSERT_FALSE(addressable_devices.empty());
for (auto& device : addressable_devices) {
ASSERT_TRUE(this->IsValidDeviceId(device));
}
absl::Span<PJRT_Device* const> client_devices = GetClientDevices();
for (auto& addressable_device : addressable_devices) {
ASSERT_THAT(client_devices, ::testing::Contains(addressable_device));
}
}
TEST_F(PjrtCApiTest, LookupDevice) {
PJRT_Client_LookupDevice_Args lookup_device_args =
PJRT_Client_LookupDevice_Args{
.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.id = 0,
.device = nullptr,
};
PJRT_Error* lookup_device_error =
api_->PJRT_Client_LookupDevice(&lookup_device_args);
ASSERT_EQ(lookup_device_error, nullptr);
int id = GetDeviceId(lookup_device_args.device);
ASSERT_EQ(id, 0);
}
TEST_F(PjrtCApiTest, LookupAddressableDevice) {
PJRT_Client_LookupAddressableDevice_Args lookup_addressable_device_args =
PJRT_Client_LookupAddressableDevice_Args{
.struct_size = PJRT_Client_LookupAddressableDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.local_hardware_id = 0,
.addressable_device = nullptr,
};
PJRT_Error* lookup_addressable_device_error =
api_->PJRT_Client_LookupAddressableDevice(
&lookup_addressable_device_args);
ASSERT_EQ(lookup_addressable_device_error, nullptr);
int local_hardware_id =
GetLocalHardwareId(lookup_addressable_device_args.addressable_device);
ASSERT_EQ(local_hardware_id, 0);
}
TEST_F(PjrtCApiTest, GetDefaultDeviceAssignmentNominal) {
constexpr int kNumReplicas = 2;
constexpr int kNumPartitions = 1;
std::vector<int> assignment_buffer(kNumReplicas * kNumPartitions);
PJRT_Client_DefaultDeviceAssignment_Args args{
.struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.num_replicas = kNumReplicas,
.num_partitions = kNumPartitions,
.default_assignment_size = assignment_buffer.size(),
.default_assignment = assignment_buffer.data(),
};
auto error = ToUniquePtr(api_->PJRT_Client_DefaultDeviceAssignment(&args));
EXPECT_EQ(error, nullptr);
}
TEST_F(PjrtCApiTest, GetDefaultDeviceAssignmentBufferTooSmall) {
constexpr int kNumReplicas = 4;
constexpr int kNumPartitions = 2;
constexpr size_t kBufferSize = 7;
std::vector<int> assignment_buffer(kBufferSize);
PJRT_Client_DefaultDeviceAssignment_Args args{
.struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.num_replicas = kNumReplicas,
.num_partitions = kNumPartitions,
.default_assignment_size = assignment_buffer.size(),
.default_assignment = assignment_buffer.data(),
};
auto error = ToUniquePtr(api_->PJRT_Client_DefaultDeviceAssignment(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
EXPECT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_EQ(status.message(),
"PJRT_Client_DefaultDeviceAssignment: `default_assignment_size` 7"
" < `num_replicas * num_partitions`, 4 * 2 = 8");
}
TEST_F(PjrtCApiTest, LookupDeviceNegativeId) {
PJRT_Client_LookupDevice_Args args = PJRT_Client_LookupDevice_Args{
.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.id = -1,
.device = nullptr,
};
absl::Status expected =
absl::Status(absl::StatusCode::kInvalidArgument,
"No matching device found for device_id -1");
auto error = ToUniquePtr(api_->PJRT_Client_LookupDevice(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
ASSERT_EQ(status, expected);
}
TEST_F(PjrtCApiTest, LookupDeviceOutOfRangeId) {
int out_of_range_id = GetNumDevices();
PJRT_Client_LookupDevice_Args args = PJRT_Client_LookupDevice_Args{
.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
.id = out_of_range_id,
.device = nullptr,
};
absl::Status expected = absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("No matching device found for device_id ", out_of_range_id));
auto error = ToUniquePtr(api_->PJRT_Client_LookupDevice(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
ASSERT_EQ(status, expected);
}
static constexpr std::string_view kExecutableName = "operation";
void destroy_executable(PJRT_LoadedExecutable* executable,
const PJRT_Api* api) {
PJRT_LoadedExecutable_Destroy_Args args{
.struct_size = PJRT_LoadedExecutable_Destroy_Args_STRUCT_SIZE,
.extension_start = nullptr,
.executable = executable,
};
PJRT_Error* error = api->PJRT_LoadedExecutable_Destroy(&args);
CHECK_EQ(error, nullptr);
}
TEST_F(PjrtCApiTest, BufferTransferImmutableUntilTransferCompletes) {
xla::Shape shape = xla::ShapeUtil::MakeShapeWithType<float>({4});
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
PJRT_Client_BufferFromHostBuffer_Args args = CreateBufferFromHostBufferArgs(
float_data, shape,
xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes);
PJRT_Error* error = api_->PJRT_Client_BufferFromHostBuffer(&args);
CHECK_EQ(error, nullptr);
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer(
args.buffer, ::pjrt::MakeBufferDeleter(api_));
std::unique_ptr<PJRT_Event, ::pjrt::PJRT_EventDeleter> event(
args.done_with_host_buffer, ::pjrt::MakeEventDeleter(api_));
PJRT_Event_Await_Args await_args;
await_args.struct_size = PJRT_Event_Await_Args_STRUCT_SIZE;
await_args.extension_start = nullptr;
await_args.event = event.get();
PJRT_Error* event_error = api_->PJRT_Event_Await(&await_args);
ASSERT_EQ(event_error, nullptr);
}
TEST_F(PjrtCApiTest, Compile) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
std::string options_str = BuildSingleDeviceCompileOptionStr();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
std::string format(::pjrt::kMlirFormat);
std::string program_code{module_add_one};
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = program_code.data(),
.code_size = program_code.length(),
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
::pjrt::LogFatalIfPjrtError(error, api_);
ASSERT_EQ(error, nullptr);
destroy_executable(args.executable, api_);
}
TEST_F(PjrtCApiTest, CompileXlaComputation) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
xla::DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = 0;
xla::DeviceAssignmentProto proto;
device_assignment.Serialize(&proto);
std::string device_assignment_str = proto.SerializeAsString();
std::string options_str = BuildSingleDeviceCompileOptionStr();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
absl::StatusOr<std::unique_ptr<xla::HloModule>> hlo_module =
xla::ParseAndReturnUnverifiedModule(kHloString);
ASSERT_EQ(hlo_module.ok(), true);
std::string module_str = hlo_module->get()->ToProto().SerializeAsString();
std::string format(::pjrt::kHloFormat);
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = module_str.data(),
.code_size = module_str.size(),
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
::pjrt::LogFatalIfPjrtError(error, api_);
ASSERT_EQ(error, nullptr);
destroy_executable(args.executable, api_);
}
TEST_F(PjrtCApiTest, CompileInvalidOption) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
std::string options_str = "invalid compile options";
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
std::string format(::pjrt::kMlirFormat);
std::string program_code{module_add_one};
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = program_code.data(),
.code_size = program_code.length(),
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status.message(),
"PJRT_Client_Compile: failed to deserialize CompileOptionsProto");
destroy_executable(args.executable, api_);
::pjrt::MakeErrorDeleter(api_)(error);
}
TEST_F(PjrtCApiTest, CompileInvalidProgramFormat) {
PJRT_Client_Compile_Args args = PJRT_Client_Compile_Args{
.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE,
.extension_start = nullptr,
.client = client_,
};
xla::DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = 0;
xla::DeviceAssignmentProto proto;
device_assignment.Serialize(&proto);
std::string device_assignment_str = proto.SerializeAsString();
std::string options_str = BuildSingleDeviceCompileOptionStr();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
std::string format("invalid");
PJRT_Program program = PJRT_Program{
.struct_size = PJRT_Program_STRUCT_SIZE,
.extension_start = nullptr,
.code = nullptr,
.code_size = 0,
.format = format.c_str(),
.format_size = format.size(),
};
args.program = &program;
PJRT_Error* error = api_->PJRT_Client_Compile(&args);
absl::Status status = ::pjrt::PjrtErrorToStatus(error, api_);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status.message(), "Unknown program format 'invalid'.");
destroy_executable(args.executable, api_);
::pjrt::MakeErrorDeleter(api_)(error);
}
TEST_F(PjrtCApiTest, DeviceId) {
auto* device = GetClientDevices()[0];
int id = GetDeviceId(device);
CHECK_EQ(id, 0);
}
TEST_F(PjrtCApiTest, DeviceProcessIndex) {
PJRT_DeviceDescription_ProcessIndex_Args args =
PJRT_DeviceDescription_ProcessIndex_Args{
.struct_size = PJRT_DeviceDescription_ProcessIndex_Args_STRUCT_SIZE,
.extension_start = nullptr,
.device_description =
::pjrt::GetDeviceDescription(api_, GetClientDevices()[0]),
.process_index = -1,
};
PJRT_Error* error = api_->PJRT_DeviceDescription_ProcessIndex(&args);
ASSERT_EQ(error, nullptr);
CHECK_EQ(args.process_index, 0);
}
TEST_F(PjrtCApiTest, DeviceIsAddressable) {
PJRT_Device_IsAddressable_Args args = PJRT_Device_IsAddressable_Args{
.struct_size = PJRT_Device_IsAddressable_Args_STRUCT_SIZE,
.extension_start = nullptr,
.device = GetClientDevices()[0],
.is_addressable = false,
};
PJRT_Error* error = api_->PJRT_Device_IsAddressable(&args);
ASSERT_EQ(error, nullptr);
CHECK_EQ(args.is_addressable, true);
}
TEST_F(PjrtCApiTest, DeviceLocalHardwareId) {
PJRT_Device_LocalHardwareId_Args args = PJRT_Device_LocalHardwareId_Args{
.struct_size = PJRT_Device_LocalHardwareId_Args_STRUCT_SIZE,
.extension_start = nullptr,
.device = GetClientDevices()[0],
.local_hardware_id = -1,
};
PJRT_Error* error = api_->PJRT_Device_LocalHardwareId(&args);
ASSERT_EQ(error, nullptr);
CHECK_EQ(args.local_hardware_id, 0);
}
class PjrtCApiBufferTest : public PjrtCApiTest {
protected:
void SetUp() override {
PjrtCApiTest::SetUp();
auto buffer_and_event = create_buffer();
buffer_ = std::move(buffer_and_event.first);
event_ = buffer_and_event.second;
}
void TearDown() override {
TF_CHECK_OK(event_.Await());
buffer_.reset(nullptr);
PjrtCApiTest::TearDown();
}
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer_;
xla::PjRtFuture<> event_;
};
TEST_F(PjrtCApiBufferTest, IsDeleted) {
PJRT_Buffer_IsDeleted_Args is_deleted_args;
is_deleted_args.struct_size = PJRT_Buffer_IsDeleted_Args_STRUCT_SIZE;
is_deleted_args.extension_start = nullptr;
is_deleted_args.buffer = buffer_.get();
PJRT_Error* is_deleted_error = api_->PJRT_Buffer_IsDeleted(&is_deleted_args);
ASSERT_EQ(is_deleted_error, nullptr);
ASSERT_FALSE(is_deleted_args.is_deleted);
PJRT_Buffer_Delete_Args delete_args;
delete_args.struct_size = PJRT_Buffer_Delete_Args_STRUCT_SIZE;
delete_args.extension_start = nullptr;
delete_args.buffer = buffer_.get();
PJRT_Error* delete_error = api_->PJRT_Buffer_Delete(&delete_args);
ASSERT_EQ(delete_error, nullptr);
is_deleted_error = api_->PJRT_Buffer_IsDeleted(&is_deleted_args);
ASSERT_EQ(is_deleted_error, nullptr);
ASSERT_TRUE(is_deleted_args.is_deleted);
}
TEST_F(PjrtCApiBufferTest, GetOnDeviceSizeInBytes) {
PJRT_Buffer_OnDeviceSizeInBytes_Args args;
args.struct_size = PJRT_Buffer_OnDeviceSizeInBytes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
PJRT_Error* on_device_size_bytes_error =
api_->PJRT_Buffer_OnDeviceSizeInBytes(&args);
ASSERT_EQ(on_device_size_bytes_error, nullptr);
ASSERT_GT(args.on_device_size_in_bytes, 0);
}
TEST_F(PjrtCApiBufferTest, ReadyEvent) {
PJRT_Buffer_ReadyEvent_Args get_event_args;
get_event_args.struct_size = PJRT_Buffer_ReadyEvent_Args_STRUCT_SIZE;
get_event_args.extension_start = nullptr;
get_event_args.buffer = buffer_.get();
auto error = ToUniquePtr(api_->PJRT_Buffer_ReadyEvent(&get_event_args));
ASSERT_EQ(error, nullptr);
PJRT_Event* event = get_event_args.event;
ASSERT_NE(event, nullptr);
PJRT_Event_Await_Args await_args;
await_args.struct_size = PJRT_Event_Await_Args_STRUCT_SIZE;
await_args.extension_start = nullptr;
await_args.event = event;
error.reset(api_->PJRT_Event_Await(&await_args));
ASSERT_EQ(error, nullptr);
PJRT_Event_IsReady_Args ready_args;
ready_args.struct_size = PJRT_Event_IsReady_Args_STRUCT_SIZE;
ready_args.extension_start = nullptr;
ready_args.event = event;
error.reset(api_->PJRT_Event_IsReady(&ready_args));
ASSERT_EQ(error, nullptr);
EXPECT_TRUE(ready_args.is_ready);
PJRT_Event_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Event_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.event = event;
error.reset(api_->PJRT_Event_Destroy(&destroy_args));
EXPECT_EQ(error, nullptr);
}
TEST_F(PjrtCApiBufferTest, ToHostBufferNoHostLayout) {
PJRT_Buffer_ToHostBuffer_Args args;
args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.src = buffer_.get();
xla::Shape host_shape = xla::ShapeUtil::MakeShape(xla::F32, {4});
auto literal = std::make_shared<xla::Literal>(host_shape);
args.host_layout = nullptr;
args.dst = literal->untyped_data();
args.dst_size = xla::ShapeUtil::ByteSizeOfElements(host_shape);
args.event = nullptr;
PJRT_Error* error = api_->PJRT_Buffer_ToHostBuffer(&args);
xla::PjRtFuture<> transfer_to_host =
::pjrt::ConvertCEventToCppFuture(args.event, api_);
TF_CHECK_OK(transfer_to_host.Await());
EXPECT_EQ(error, nullptr);
ASSERT_EQ(literal->data<float>().size(), 4);
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
xla::LiteralUtil::CreateR1<float>(float_data), *literal));
}
TEST_F(PjrtCApiBufferTest, IncreaseAndDecreaseReferenceCount) {
PJRT_Buffer_IncreaseExternalReferenceCount_Args increase_reference_count_args;
increase_reference_count_args.struct_size =
PJRT_Buffer_IncreaseExternalReferenceCount_Args_STRUCT_SIZE;
increase_reference_count_args.extension_start = nullptr;
increase_reference_count_args.buffer = buffer_.get();
PJRT_Error* increase_reference_count_error =
api_->PJRT_Buffer_IncreaseExternalReferenceCount(
&increase_reference_count_args);
EXPECT_EQ(increase_reference_count_error, nullptr);
PJRT_Buffer_DecreaseExternalReferenceCount_Args decrease_reference_count_args;
decrease_reference_count_args.struct_size =
PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE;
decrease_reference_count_args.extension_start = nullptr;
decrease_reference_count_args.buffer = buffer_.get();
PJRT_Error* decrease_reference_error =
api_->PJRT_Buffer_DecreaseExternalReferenceCount(
&decrease_reference_count_args);
EXPECT_EQ(decrease_reference_error, nullptr);
}
TEST_F(PjrtCApiBufferTest, DecreaseReferenceCountReturnsError) {
PJRT_Buffer_DecreaseExternalReferenceCount_Args args;
args.struct_size =
PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
auto error =
ToUniquePtr(api_->PJRT_Buffer_DecreaseExternalReferenceCount(&args));
ASSERT_NE(error, nullptr);
absl::Status status = ::pjrt::PjrtErrorToStatus(error.get(), api_);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status.message(),
"Attempting to decrease reference on a buffer with zero reference "
"count.");
}
TEST_F(PjrtCApiBufferTest, OpaqueDeviceMemoryDataPointer) {
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args args;
args.struct_size = PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
PJRT_Error* error = api_->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(&args);
EXPECT_EQ(error, nullptr);
EXPECT_NE(args.device_memory_ptr, nullptr);
}
class PjrtCommonCApiHelpersTest : public PjrtCApiTest {};
TEST_F(PjrtCommonCApiHelpersTest, PjrtErrorToStatus) {
EXPECT_TRUE(::pjrt::PjrtErrorToStatus(nullptr, api_).ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
82311f9f-96cf-4304-b0ea-6fd681e6896c | cpp | tensorflow/tensorflow | cpu_backend_threadpool | tensorflow/lite/kernels/cpu_backend_threadpool.h | tensorflow/lite/kernels/cpu_backend_threadpool_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#ifdef TFLITE_WITH_RUY
#include "ruy/context.h"
#include "ruy/thread_pool.h"
#else
#include "public/gemmlowp.h"
#endif
namespace tflite {
namespace cpu_backend_threadpool {
#ifdef TFLITE_WITH_RUY
using Task = ruy::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->ruy_context()->mutable_thread_pool()->Execute(
tasks_count, tasks);
}
#else
using Task = gemmlowp::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->gemmlowp_context()->workers_pool()->Execute(tasks_count,
tasks);
}
#endif
}
}
#endif | #include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace {
class TestGenerateArrayOfIncrementingIntsTask
: public cpu_backend_threadpool::Task {
public:
TestGenerateArrayOfIncrementingIntsTask(int* buffer, int start, int end)
: buffer_(buffer), start_(start), end_(end) {}
void Run() override {
for (int i = start_; i < end_; i++) {
buffer_[i] = i;
}
}
private:
int* buffer_;
int start_;
int end_;
};
void TestGenerateArrayOfIncrementingInts(int num_threads, int size) {
std::vector<int> buffer(size);
std::vector<TestGenerateArrayOfIncrementingIntsTask> tasks;
int rough_size_per_thread = size / num_threads;
int start = 0;
for (int thread = 0; thread < num_threads; thread++) {
int end = start + rough_size_per_thread;
if (thread == num_threads - 1) {
end = size;
}
tasks.emplace_back(buffer.data(), start, end);
start = end;
}
ASSERT_EQ(num_threads, tasks.size());
CpuBackendContext context;
context.SetMaxNumThreads(num_threads);
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), &context);
for (int i = 0; i < size; i++) {
ASSERT_EQ(buffer[i], i);
}
}
TEST(CpuBackendThreadpoolTest, OneThreadSize100) {
TestGenerateArrayOfIncrementingInts(1, 100);
}
TEST(CpuBackendThreadpoolTest, ThreeThreadsSize1000000) {
TestGenerateArrayOfIncrementingInts(3, 1000000);
}
TEST(CpuBackendThreadpoolTest, TenThreadsSize1234567) {
TestGenerateArrayOfIncrementingInts(10, 1234567);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_threadpool.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_threadpool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf625149-027c-4753-b82c-cd10c5a4c99b | cpp | tensorflow/tensorflow | affine_map_evaluator | third_party/xla/xla/service/gpu/model/affine_map_evaluator.cc | third_party/xla/xla/service/gpu/model/affine_map_evaluator_test.cc | #include "xla/service/gpu/model/affine_map_evaluator.h"
#include <cstdint>
#include <vector>
#include "absl/types/span.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/Support/LLVM.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineSymbolExpr;
}
int64_t EvaluateAffineExpr(AffineExpr expr,
absl::Span<int64_t const> dim_values,
absl::Span<int64_t const> symbol_values) {
AffineExprKind kind = expr.getKind();
if (kind == AffineExprKind::Constant) {
return mlir::cast<AffineConstantExpr>(expr).getValue();
}
if (kind == AffineExprKind::DimId) {
return dim_values[mlir::cast<AffineDimExpr>(expr).getPosition()];
}
if (kind == AffineExprKind::SymbolId) {
return symbol_values[mlir::cast<AffineSymbolExpr>(expr).getPosition()];
}
auto binary_expr = mlir::cast<AffineBinaryOpExpr>(expr);
int64_t lhs =
EvaluateAffineExpr(binary_expr.getLHS(), dim_values, symbol_values);
int64_t rhs =
EvaluateAffineExpr(binary_expr.getRHS(), dim_values, symbol_values);
switch (kind) {
case AffineExprKind::Add:
return lhs + rhs;
case AffineExprKind::Mul:
return lhs * rhs;
case AffineExprKind::FloorDiv:
return llvm::divideFloorSigned(lhs, rhs);
case AffineExprKind::Mod:
return lhs % rhs;
default:
LOG(FATAL) << "Unsupported expression";
}
}
SmallVector<int64_t> EvaluateAffineMap(
AffineMap affine_map, absl::Span<int64_t const> dim_values,
absl::Span<int64_t const> symbol_values) {
CHECK_EQ(affine_map.getNumDims(), dim_values.size());
CHECK_EQ(affine_map.getNumSymbols(), symbol_values.size());
SmallVector<int64_t> results;
results.reserve(affine_map.getNumResults());
for (auto expr : affine_map.getResults()) {
results.push_back(EvaluateAffineExpr(expr, dim_values, symbol_values));
}
return results;
}
}
} | #include "xla/service/gpu/model/affine_map_evaluator.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineExpr;
using ::mlir::AffineMap;
using ::mlir::bindDims;
using ::mlir::bindSymbols;
using ::testing::ElementsAre;
class AffineMapEvaluator : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
};
TEST_F(AffineMapEvaluator, EvaluateMap) {
AffineExpr d0, d1, s0, s1;
bindDims(&mlir_context_, d0, d1);
bindSymbols(&mlir_context_, s0, s1);
auto affine_map =
AffineMap::get(2, 2, {d0 + d1.floorDiv(8), s0 + s1 % 16}, &mlir_context_);
auto res = EvaluateAffineMap(affine_map, {1, 2},
{3, 4});
EXPECT_THAT(res, ElementsAre(1, 7));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/affine_map_evaluator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/affine_map_evaluator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2c67b126-fa63-44ac-8f6f-a4423e067344 | cpp | google/cel-cpp | legacy_type_provider | eval/public/structs/legacy_type_provider.cc | eval/public/structs/legacy_type_provider_test.cc | #include "eval/public/structs/legacy_type_provider.h"
#include <cstdint>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/legacy_value.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_factory.h"
#include "eval/public/message_wrapper.h"
#include "eval/public/structs/legacy_type_adapter.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
namespace google::api::expr::runtime {
namespace {
using google::api::expr::runtime::LegacyTypeAdapter;
using google::api::expr::runtime::MessageWrapper;
class LegacyStructValueBuilder final : public cel::StructValueBuilder {
public:
LegacyStructValueBuilder(cel::MemoryManagerRef memory_manager,
LegacyTypeAdapter adapter,
MessageWrapper::Builder builder)
: memory_manager_(memory_manager),
adapter_(adapter),
builder_(std::move(builder)) {}
absl::Status SetFieldByName(absl::string_view name,
cel::Value value) override {
CEL_ASSIGN_OR_RETURN(
auto legacy_value,
LegacyValue(cel::extensions::ProtoMemoryManagerArena(memory_manager_),
value));
return adapter_.mutation_apis()->SetField(name, legacy_value,
memory_manager_, builder_);
}
absl::Status SetFieldByNumber(int64_t number, cel::Value value) override {
CEL_ASSIGN_OR_RETURN(
auto legacy_value,
LegacyValue(cel::extensions::ProtoMemoryManagerArena(memory_manager_),
value));
return adapter_.mutation_apis()->SetFieldByNumber(
number, legacy_value, memory_manager_, builder_);
}
absl::StatusOr<cel::StructValue> Build() && override {
CEL_ASSIGN_OR_RETURN(auto message,
adapter_.mutation_apis()->AdaptFromWellKnownType(
memory_manager_, std::move(builder_)));
if (!message.IsMessage()) {
return absl::FailedPreconditionError("expected MessageWrapper");
}
auto message_wrapper = message.MessageWrapperOrDie();
return cel::common_internal::LegacyStructValue{
reinterpret_cast<uintptr_t>(message_wrapper.message_ptr()) |
(message_wrapper.HasFullProto()
? cel::base_internal::kMessageWrapperTagMessageValue
: uintptr_t{0}),
reinterpret_cast<uintptr_t>(message_wrapper.legacy_type_info())};
}
private:
cel::MemoryManagerRef memory_manager_;
LegacyTypeAdapter adapter_;
MessageWrapper::Builder builder_;
};
}
absl::StatusOr<absl::optional<cel::Unique<cel::StructValueBuilder>>>
LegacyTypeProvider::NewStructValueBuilder(cel::ValueFactory& value_factory,
const cel::StructType& type) const {
if (auto type_adapter = ProvideLegacyType(type.name());
type_adapter.has_value()) {
const auto* mutation_apis = type_adapter->mutation_apis();
if (mutation_apis == nullptr) {
return absl::FailedPreconditionError(absl::StrCat(
"LegacyTypeMutationApis missing for type: ", type.name()));
}
CEL_ASSIGN_OR_RETURN(auto builder, mutation_apis->NewInstance(
value_factory.GetMemoryManager()));
return value_factory.GetMemoryManager()
.MakeUnique<LegacyStructValueBuilder>(value_factory.GetMemoryManager(),
*type_adapter,
std::move(builder));
}
return absl::nullopt;
}
absl::StatusOr<absl::optional<cel::Value>>
LegacyTypeProvider::DeserializeValueImpl(cel::ValueFactory& value_factory,
absl::string_view type_url,
const absl::Cord& value) const {
auto type_name = absl::StripPrefix(type_url, cel::kTypeGoogleApisComPrefix);
if (auto type_info = ProvideLegacyTypeInfo(type_name);
type_info.has_value()) {
if (auto type_adapter = ProvideLegacyType(type_name);
type_adapter.has_value()) {
const auto* mutation_apis = type_adapter->mutation_apis();
if (mutation_apis == nullptr) {
return absl::FailedPreconditionError(absl::StrCat(
"LegacyTypeMutationApis missing for type: ", type_name));
}
CEL_ASSIGN_OR_RETURN(auto builder, mutation_apis->NewInstance(
value_factory.GetMemoryManager()));
if (!builder.message_ptr()->ParsePartialFromCord(value)) {
return absl::UnknownError("failed to parse protocol buffer message");
}
CEL_ASSIGN_OR_RETURN(
auto legacy_value,
mutation_apis->AdaptFromWellKnownType(
value_factory.GetMemoryManager(), std::move(builder)));
cel::Value modern_value;
CEL_RETURN_IF_ERROR(ModernValue(cel::extensions::ProtoMemoryManagerArena(
value_factory.GetMemoryManager()),
legacy_value, modern_value));
return modern_value;
}
}
return absl::nullopt;
}
absl::StatusOr<absl::optional<cel::Type>> LegacyTypeProvider::FindTypeImpl(
cel::TypeFactory& type_factory, absl::string_view name) const {
if (auto type_info = ProvideLegacyTypeInfo(name); type_info.has_value()) {
const auto* descriptor = (*type_info)->GetDescriptor(MessageWrapper());
if (descriptor != nullptr) {
return cel::MessageType(descriptor);
}
return cel::common_internal::MakeBasicStructType(
(*type_info)->GetTypename(MessageWrapper()));
}
return absl::nullopt;
}
absl::StatusOr<absl::optional<cel::StructTypeField>>
LegacyTypeProvider::FindStructTypeFieldByNameImpl(
cel::TypeFactory& type_factory, absl::string_view type,
absl::string_view name) const {
if (auto type_info = ProvideLegacyTypeInfo(type); type_info.has_value()) {
if (auto field_desc = (*type_info)->FindFieldByName(name);
field_desc.has_value()) {
return cel::common_internal::BasicStructTypeField(
field_desc->name, field_desc->number, cel::DynType{});
} else {
const auto* mutation_apis =
(*type_info)->GetMutationApis(MessageWrapper());
if (mutation_apis == nullptr || !mutation_apis->DefinesField(name)) {
return absl::nullopt;
}
return cel::common_internal::BasicStructTypeField(name, 0,
cel::DynType{});
}
}
return absl::nullopt;
}
} | #include "eval/public/structs/legacy_type_provider.h"
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
class LegacyTypeProviderTestEmpty : public LegacyTypeProvider {
public:
absl::optional<LegacyTypeAdapter> ProvideLegacyType(
absl::string_view name) const override {
return absl::nullopt;
}
};
class LegacyTypeInfoApisEmpty : public LegacyTypeInfoApis {
public:
std::string DebugString(
const MessageWrapper& wrapped_message) const override {
return "";
}
absl::string_view GetTypename(
const MessageWrapper& wrapped_message) const override {
return test_string_;
}
const LegacyTypeAccessApis* GetAccessApis(
const MessageWrapper& wrapped_message) const override {
return nullptr;
}
private:
const std::string test_string_ = "test";
};
class LegacyTypeProviderTestImpl : public LegacyTypeProvider {
public:
explicit LegacyTypeProviderTestImpl(const LegacyTypeInfoApis* test_type_info)
: test_type_info_(test_type_info) {}
absl::optional<LegacyTypeAdapter> ProvideLegacyType(
absl::string_view name) const override {
if (name == "test") {
return LegacyTypeAdapter(nullptr, nullptr);
}
return absl::nullopt;
}
absl::optional<const LegacyTypeInfoApis*> ProvideLegacyTypeInfo(
absl::string_view name) const override {
if (name == "test") {
return test_type_info_;
}
return absl::nullopt;
}
private:
const LegacyTypeInfoApis* test_type_info_ = nullptr;
};
TEST(LegacyTypeProviderTest, EmptyTypeProviderHasProvideTypeInfo) {
LegacyTypeProviderTestEmpty provider;
EXPECT_EQ(provider.ProvideLegacyType("test"), absl::nullopt);
EXPECT_EQ(provider.ProvideLegacyTypeInfo("test"), absl::nullopt);
}
TEST(LegacyTypeProviderTest, NonEmptyTypeProviderProvidesSomeTypes) {
LegacyTypeInfoApisEmpty test_type_info;
LegacyTypeProviderTestImpl provider(&test_type_info);
EXPECT_TRUE(provider.ProvideLegacyType("test").has_value());
EXPECT_TRUE(provider.ProvideLegacyTypeInfo("test").has_value());
EXPECT_EQ(provider.ProvideLegacyType("other"), absl::nullopt);
EXPECT_EQ(provider.ProvideLegacyTypeInfo("other"), absl::nullopt);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/legacy_type_provider.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/legacy_type_provider_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
8ec5cebe-660a-40ef-881c-5670d88f4be5 | cpp | tensorflow/tensorflow | prng | third_party/xla/xla/hlo/builder/lib/prng.cc | third_party/xla/xla/hlo/builder/lib/prng_test.cc | #include "xla/hlo/builder/lib/prng.h"
#include <array>
#include <cmath>
#include <cstdint>
#include <iterator>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
xla::XlaOp ConcatScalars(xla::XlaBuilder* builder,
absl::Span<const xla::XlaOp> scalars) {
std::vector<xla::XlaOp> vectors;
absl::c_transform(scalars, std::back_inserter(vectors),
[](xla::XlaOp x) { return xla::Reshape(x, {1}); });
return ConcatInDim(builder, vectors, 0);
}
namespace {
XlaOp RotateLeftU32(XlaOp v, int distance) {
return (v << ConstantR0<uint32_t>(v.builder(), distance)) |
ShiftRightLogical(v, ConstantR0<uint32_t>(v.builder(), 32 - distance));
}
using ThreeFry2x32State = std::array<XlaOp, 2>;
ThreeFry2x32State ThreeFry2x32(ThreeFry2x32State input, ThreeFry2x32State key) {
XlaBuilder* builder = input[0].builder();
key[0] = BitcastConvertType(key[0], U32);
key[1] = BitcastConvertType(key[1], U32);
constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
ThreeFry2x32State x;
std::array<XlaOp, 3> ks;
ks[2] = ConstantR0<uint32_t>(builder, 0x1BD11BDA);
for (int i = 0; i < 2; ++i) {
ks[i] = key[i];
x[i] = input[i];
ks[2] = ks[2] ^ key[i];
}
x[0] = x[0] + ks[0];
x[1] = x[1] + ks[1];
auto round = [](ThreeFry2x32State v, int rotation) {
v[0] = v[0] + v[1];
v[1] = RotateLeftU32(v[1], rotation);
v[1] = v[0] ^ v[1];
return v;
};
x = round(x, rotations[0]);
x = round(x, rotations[1]);
x = round(x, rotations[2]);
x = round(x, rotations[3]);
x[0] = x[0] + ks[1];
x[1] = x[1] + ks[2] + ConstantR0<uint32_t>(builder, 1);
x = round(x, rotations[4]);
x = round(x, rotations[5]);
x = round(x, rotations[6]);
x = round(x, rotations[7]);
x[0] = x[0] + ks[2];
x[1] = x[1] + ks[0] + ConstantR0<uint32_t>(builder, 2);
x = round(x, rotations[0]);
x = round(x, rotations[1]);
x = round(x, rotations[2]);
x = round(x, rotations[3]);
x[0] = x[0] + ks[0];
x[1] = x[1] + ks[1] + ConstantR0<uint32_t>(builder, 3);
x = round(x, rotations[4]);
x = round(x, rotations[5]);
x = round(x, rotations[6]);
x = round(x, rotations[7]);
x[0] = x[0] + ks[1];
x[1] = x[1] + ks[2] + ConstantR0<uint32_t>(builder, 4);
x = round(x, rotations[0]);
x = round(x, rotations[1]);
x = round(x, rotations[2]);
x = round(x, rotations[3]);
x[0] = x[0] + ks[2];
x[1] = x[1] + ks[0] + ConstantR0<uint32_t>(builder, 5);
return x;
}
std::array<XlaOp, 2> Uint64ToUint32s(XlaOp u64) {
XlaBuilder* builder = u64.builder();
XlaOp const32 = ConstantR0WithType(builder, U64, 32);
XlaOp fst = ConvertElementType(u64, U32);
XlaOp snd = ConvertElementType(ShiftRightLogical(u64, const32), U32);
return {fst, snd};
}
XlaOp Uint32sToUint64(std::array<XlaOp, 2> u32s) {
XlaBuilder* builder = u32s[0].builder();
return ConvertElementType(u32s[0], U64) |
ShiftLeft(ConvertElementType(u32s[1], U64),
ConstantR0WithType(builder, U64, 32));
}
std::pair<ThreeFry2x32State, XlaOp> GetThreeFryInputsAndUpdatedState(
XlaOp initial_state, const Shape& shape) {
XlaBuilder* builder = initial_state.builder();
auto u64_shape = ShapeUtil::MakeShape(U64, shape.dimensions());
auto input_u64 = Broadcast(Reshape(initial_state, {}), shape.dimensions());
int64_t trailing_dims_product = 1;
for (int64_t i = shape.rank() - 1; i >= 0; --i) {
if (shape.dimensions(i) < 2) {
continue;
}
input_u64 =
input_u64 + (Iota(builder, u64_shape, i) *
ConstantR0<uint64_t>(builder, trailing_dims_product));
trailing_dims_product *= shape.dimensions(i);
}
XlaOp new_state = initial_state +
ConstantR0<uint64_t>(builder, ShapeUtil::ElementsIn(shape));
return std::make_pair(Uint64ToUint32s(input_u64), new_state);
}
struct SplitShapePair {
Shape half_shape;
Shape concat_shape;
int64_t split_dim;
int64_t new_concat_dim;
};
SplitShapePair SplitShapeIntoHalves(const Shape& shape) {
SplitShapePair pair;
if (shape.rank() == 0) {
pair.half_shape = ShapeUtil::MakeShape(shape.element_type(), {1});
pair.concat_shape = ShapeUtil::MakeShape(shape.element_type(), {2});
pair.split_dim = 0;
pair.new_concat_dim = 0;
return pair;
}
pair.split_dim = -1;
for (int64_t i = 0; i < shape.rank(); ++i) {
if (shape.dimensions(i) % 2 == 0) {
pair.split_dim = i;
break;
}
}
if (pair.split_dim == -1) {
for (int64_t i = 0; i < shape.rank(); ++i) {
if (pair.split_dim == -1 ||
shape.dimensions(i) > shape.dimensions(pair.split_dim)) {
pair.split_dim = i;
}
}
}
if (pair.split_dim < 0) {
LOG(ERROR) << "This point shouldn't have been reached.";
}
std::vector<int64_t> half_shape_dims;
std::vector<int64_t> concat_shape_dims;
const auto rank = shape.rank();
half_shape_dims.reserve(rank + 1);
concat_shape_dims.reserve(rank + 1);
for (int64_t i = 0; i < rank; ++i) {
if (i == pair.split_dim) {
half_shape_dims.push_back(CeilOfRatio<int64_t>(shape.dimensions(i), 2));
half_shape_dims.push_back(1);
concat_shape_dims.push_back(half_shape_dims[i]);
concat_shape_dims.push_back(2);
} else {
half_shape_dims.push_back(shape.dimensions(i));
concat_shape_dims.push_back(shape.dimensions(i));
}
}
pair.new_concat_dim = pair.split_dim + 1;
pair.half_shape = ShapeUtil::MakeShape(shape.element_type(), half_shape_dims);
pair.concat_shape =
ShapeUtil::MakeShape(shape.element_type(), concat_shape_dims);
return pair;
}
XlaOp CombineShapePair(absl::Span<const XlaOp> pair,
const SplitShapePair& shape_pair,
const Shape& original_shape) {
if (original_shape.rank() == 0) {
return Reshape(pair[0], {});
}
XlaBuilder* builder = pair[0].builder();
XlaOp result = ConcatInDim(builder, pair, shape_pair.new_concat_dim);
const int64_t pre_split_size =
original_shape.dimensions(shape_pair.split_dim);
std::vector<int64_t> reshape_dims(original_shape.dimensions().begin(),
original_shape.dimensions().end());
reshape_dims[shape_pair.split_dim] = RoundUpTo<int64_t>(pre_split_size, 2);
result = Reshape(result, reshape_dims);
if (reshape_dims[shape_pair.split_dim] != pre_split_size) {
result = Slice(result, std::vector<int64_t>(original_shape.rank(), 0),
original_shape.dimensions(),
std::vector<int64_t>(original_shape.rank(), 1));
}
return result;
}
RngOutput ThreeFryRngBit32(XlaOp key, XlaOp initial_state, const Shape& shape) {
auto shape_pair = SplitShapeIntoHalves(shape);
std::pair<ThreeFry2x32State, XlaOp> inputs_state =
GetThreeFryInputsAndUpdatedState(initial_state, shape_pair.half_shape);
ThreeFry2x32State inputs = inputs_state.first;
ThreeFry2x32State outputs = ThreeFry2x32(inputs, Uint64ToUint32s(key));
XlaOp result = CombineShapePair(outputs, shape_pair, shape);
return {result, inputs_state.second};
}
RngOutput ThreeFryRngBitNarrow(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
auto new_shape = shape;
new_shape.set_element_type(U32);
auto output = ThreeFryRngBit32(op_key, initial_state, new_shape);
output.value = ConvertElementType(
output.value, primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(shape.element_type())));
return output;
}
RngOutput ThreeFryRngBit64(XlaOp key, XlaOp initial_state, const Shape& shape) {
std::pair<ThreeFry2x32State, XlaOp> inputs_state =
GetThreeFryInputsAndUpdatedState(initial_state, shape);
ThreeFry2x32State inputs = inputs_state.first;
ThreeFry2x32State outputs = ThreeFry2x32(inputs, Uint64ToUint32s(key));
XlaOp result = Uint32sToUint64(outputs);
return {result, inputs_state.second};
}
using Philox4x32Key = std::array<XlaOp, 2>;
using Philox4x32State = std::array<XlaOp, 4>;
Philox4x32State Philox4x32(Philox4x32State state, Philox4x32Key key) {
static const uint32_t kPhiloxW32A = 0x9E3779B9;
static const uint32_t kPhiloxW32B = 0xBB67AE85;
static const uint32_t kPhiloxM4x32A = 0xD2511F53;
static const uint32_t kPhiloxM4x32B = 0xCD9E8D57;
struct HighLowPair {
XlaOp high;
XlaOp low;
};
auto mul_hi_low = [](XlaOp x, uint32_t k) {
auto product =
ConvertElementType(x, U64) * ConstantR0<uint64_t>(x.builder(), k);
auto low = ConvertElementType(product, U32);
auto high = ConvertElementType(
product >> ConstantR0<uint64_t>(x.builder(), 32), U32);
return HighLowPair{high, low};
};
auto philox_round = [&](Philox4x32State x, Philox4x32Key key) {
auto product0 = mul_hi_low(x[0], kPhiloxM4x32A);
auto product1 = mul_hi_low(x[2], kPhiloxM4x32B);
return Philox4x32State{product1.high ^ x[1] ^ key[0], product1.low,
product0.high ^ x[3] ^ key[1], product0.low};
};
auto raise_key = [](Philox4x32Key key) {
XlaBuilder* builder = key[0].builder();
return Philox4x32Key{key[0] + ConstantR0<uint32_t>(builder, kPhiloxW32A),
key[1] + ConstantR0<uint32_t>(builder, kPhiloxW32B)};
};
static const int kNumRounds = 10;
for (int round = 0; round < kNumRounds; ++round, key = raise_key(key)) {
state = philox_round(state, key);
}
return state;
}
std::pair<Philox4x32State, Philox4x32Key> ScramblePhiloxKey(Philox4x32Key key) {
XlaBuilder* builder = key[0].builder();
XlaOp key0 = ConvertElementType(key[0], U64);
XlaOp key1 = ConvertElementType(key[1], U64);
Philox4x32State state = {
ConvertElementType(key0, U32),
ConvertElementType(key0 >> ScalarLike(key0, 32), U32),
ConvertElementType(key1, U32),
ConvertElementType(key1 >> ScalarLike(key1, 32), U32),
};
key = {ConstantR0<uint32_t>(builder, 0x3ec8f720),
ConstantR0<uint32_t>(builder, 0x02461e29)};
state = Philox4x32(state, key);
XlaOp zero = ConstantR0<uint32_t>(builder, 0);
return {Philox4x32State{zero, zero, state[2], state[3]},
Philox4x32Key{state[0], state[1]}};
}
std::array<XlaOp, 2> Uint128AddUint64(
const std::array<XlaOp, 2>& u128, XlaOp u64,
absl::Span<const int64_t> broadcast_sizes = {}) {
auto u128_low = u128[0];
auto u128_high = u128[1];
XlaOp new_u128_low = u128_low + u64;
XlaOp one = ConstantR0<uint64_t>(u128[0].builder(), 1);
XlaOp new_u128_high = Select(Lt(new_u128_low, u128_low),
Broadcast(u128_high + one, broadcast_sizes),
Broadcast(u128_high, broadcast_sizes));
return {new_u128_low, new_u128_high};
}
std::array<XlaOp, 2> Uint32sToUint128(const std::array<XlaOp, 4>& u32s) {
return {Uint32sToUint64({u32s[0], u32s[1]}),
Uint32sToUint64({u32s[2], u32s[3]})};
}
std::array<XlaOp, 4> Uint128ToUint32s(const std::array<XlaOp, 2>& u128) {
std::array<XlaOp, 2> u128_low_32s = Uint64ToUint32s(u128[0]);
std::array<XlaOp, 2> u128_high_32s = Uint64ToUint32s(u128[1]);
return {u128_low_32s[0], u128_low_32s[1], u128_high_32s[0], u128_high_32s[1]};
}
std::array<XlaOp, 2> Uint128FromOp(XlaOp op) {
auto u128_low = xla::Reshape(xla::Slice(op, {0}, {1}, {1}), {});
auto u128_high = xla::Reshape(xla::Slice(op, {1}, {2}, {1}), {});
return {u128_low, u128_high};
}
XlaOp Uint128ToOp(std::array<XlaOp, 2> u128) {
return ConcatScalars(u128[0].builder(), {u128[0], u128[1]});
}
std::pair<Philox4x32State, XlaOp> GetPhiloxInputsAndUpdatedState(
const Philox4x32State& state, int64_t n) {
XlaBuilder* builder = state[0].builder();
XlaOp iota = Iota(builder, U64, n);
auto state_u128 = Uint32sToUint128(state);
auto inputs = Uint128ToUint32s(Uint128AddUint64(state_u128, iota, {n}));
XlaOp new_state = Uint128ToOp(
Uint128AddUint64(state_u128, ConstantR0<uint64_t>(builder, n)));
return std::make_pair(inputs, new_state);
}
std::pair<Philox4x32State, XlaOp> GeneratePhiloxBits(int64_t num_elems,
XlaOp initial_state,
Philox4x32Key key) {
Philox4x32State state;
state = Uint128ToUint32s(Uint128FromOp(initial_state));
const int64_t num_vector4 = CeilOfRatio<int64_t>(num_elems, 4);
Philox4x32State inputs;
XlaOp new_state;
std::tie(inputs, new_state) =
GetPhiloxInputsAndUpdatedState(state, num_vector4);
auto outputs = Philox4x32(inputs, key);
return std::make_pair(outputs, new_state);
}
RngOutput PhiloxRngBit32(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
XlaBuilder* builder = op_key.builder();
const int64_t num_elems = ShapeUtil::ElementsIn(shape);
Philox4x32Key key = Uint64ToUint32s(op_key);
Philox4x32State bits;
XlaOp new_state;
std::tie(bits, new_state) = GeneratePhiloxBits(num_elems, initial_state, key);
int64_t bits_len = (num_elems + 3) / 4;
for (auto i = 0; i < 4; ++i) {
bits[i] = Reshape(bits[i], {bits_len, 1});
}
XlaOp numbers = ConcatInDim(builder, {bits[0], bits[1], bits[2], bits[3]},
1);
numbers = Reshape(numbers, {bits_len * 4});
numbers = Slice(numbers, {0},
{num_elems},
{1});
return {Reshape(numbers, shape.dimensions()), new_state};
}
RngOutput PhiloxRngBitNarrow(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
auto new_shape = shape;
new_shape.set_element_type(U32);
auto output = PhiloxRngBit32(op_key, initial_state, new_shape);
output.value = ConvertElementType(
output.value, primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(shape.element_type())));
return output;
}
RngOutput PhiloxRngBit64(XlaOp op_key, XlaOp initial_state,
const Shape& shape) {
XlaBuilder* builder = op_key.builder();
const int64_t num_elems = ShapeUtil::ElementsIn(shape);
Philox4x32Key key = Uint64ToUint32s(op_key);
Philox4x32State bits32;
XlaOp new_state;
std::tie(bits32, new_state) =
GeneratePhiloxBits(num_elems * 2, initial_state, key);
std::array<XlaOp, 2> bits64;
bits64[0] = Uint32sToUint64({bits32[0], bits32[1]});
bits64[1] = Uint32sToUint64({bits32[2], bits32[3]});
int64_t bits64_len = (num_elems + 1) / 2;
for (auto i = 0; i < 2; ++i) {
bits64[i] = Reshape(bits64[i], {bits64_len, 1});
}
XlaOp numbers = ConcatInDim(builder, {bits64[0], bits64[1]},
1);
numbers = Reshape(numbers, {bits64_len * 2});
numbers = Slice(numbers, {0},
{num_elems},
{1});
return {Reshape(numbers, shape.dimensions()), new_state};
}
XlaOp ConvertRandomBitsToUniformFloatingPoint(XlaOp bits, XlaOp minval,
XlaOp maxval) {
XlaBuilder* builder = bits.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape* minval_shape,
builder->GetShapePtr(minval));
TF_ASSIGN_OR_RETURN(const Shape* bits_shape, builder->GetShapePtr(bits));
PrimitiveType value_type = minval_shape->element_type();
PrimitiveType bit_type = bits_shape->element_type();
if (!primitive_util::IsFloatingPointType(value_type) ||
!primitive_util::IsIntegralType(bit_type)) {
return InvalidArgument(
"In ConvertRandomBitsToUniformFloatingPoint, value_type and bit_type "
"can only be (floating_type, integer_type). Got combination: (%s, "
"%s).",
primitive_util::LowercasePrimitiveTypeName(value_type),
primitive_util::LowercasePrimitiveTypeName(bit_type));
}
if (value_type == F16 && bit_type == U16) {
auto mantissa = bits & ScalarLike(bits, 0x3ffu);
auto exponent = ScalarLike(bits, static_cast<uint16_t>(15) << 10);
auto u16_result = exponent | mantissa;
auto result = BitcastConvertType(u16_result, F16);
return result - ScalarLike(result, 1.0);
} else {
int num_bits = primitive_util::BitWidth(bit_type);
int num_mantissa_bits = primitive_util::SignificandWidth(value_type) - 1;
if (num_mantissa_bits > num_bits) {
return InvalidArgument(
"%s bit type argument must have enough bits to cover the number of "
"mantissa bits of the result type %s",
primitive_util::LowercasePrimitiveTypeName(bit_type),
primitive_util::LowercasePrimitiveTypeName(value_type));
}
bits = ShiftRightLogical(bits,
ScalarLike(bits, num_bits - num_mantissa_bits));
XlaOp values = ConvertElementType(bits, value_type);
values = values * ScalarLike(values, std::ldexp(1., -num_mantissa_bits));
return values * (maxval - minval) + minval;
}
});
}
XlaOp ConvertRandomBitsToUniformInt(XlaOp bits, XlaOp minval, XlaOp maxval,
PrimitiveType type,
PrimitiveType unsigned_type) {
XlaBuilder* builder = bits.builder();
XlaOp range = BitcastConvertType(maxval, unsigned_type) -
BitcastConvertType(minval, unsigned_type);
XlaOp dist = Rem(bits, range);
XlaOp dist_div_2 =
ShiftRightLogical(dist, ConstantR0WithType(builder, unsigned_type, 1));
return minval + BitcastConvertType(dist_div_2, type) +
BitcastConvertType(dist - dist_div_2, type);
}
std::pair<XlaOp, XlaOp> BoxMullerTransform(XlaOp x0, XlaOp x1) {
XlaOp u1 = Max(x0, ScalarLike(x0, 1.0e-7f));
XlaOp v1 = ScalarLike(x1, 2.0f * M_PI) * x1;
XlaOp u2 = Sqrt(ScalarLike(u1, -2.0f) * Log(u1));
return {Sin(v1) * u2, Cos(v1) * u2};
}
}
XlaOp PhiloxIncreaseCounter(XlaOp counter, XlaOp delta) {
return Uint128ToOp(Uint128AddUint64(Uint128FromOp(counter), delta));
}
RngOutput ThreeFryBitGenerator(XlaOp key, XlaOp initial_state,
const Shape& shape) {
PrimitiveType type = shape.element_type();
return primitive_util::PrimitiveTypeSwitch<RngOutput>(
[&](auto primitive_type_constant) -> RngOutput {
if constexpr (primitive_util::IsArrayType(primitive_type_constant) &&
!primitive_util::IsComplexType(primitive_type_constant) &&
primitive_type_constant != PRED) {
const int kBits = primitive_util::BitWidth(primitive_type_constant);
if (kBits < 32) {
return ThreeFryRngBitNarrow(key, initial_state, shape);
}
if (kBits == 32) {
return ThreeFryRngBit32(key, initial_state, shape);
}
if (kBits == 64) {
return ThreeFryRngBit64(key, initial_state, shape);
}
}
return {
key.builder()->ReportError(Unimplemented(
"Types other than F16, F32, F64, U16, S16, U32, S32, U64 and "
"S64 are not implemented by ThreeFryBitGenerator; got %s",
primitive_util::LowercasePrimitiveTypeName(type))),
initial_state};
},
type);
}
RngOutput PhiloxBitGenerator(XlaOp key, XlaOp initial_state,
const Shape& shape) {
PrimitiveType type = shape.element_type();
return primitive_util::PrimitiveTypeSwitch<RngOutput>(
[&](auto primitive_type_constant) -> RngOutput {
if constexpr (primitive_util::IsArrayType(primitive_type_constant) &&
!primitive_util::IsComplexType(primitive_type_constant) &&
primitive_type_constant != PRED) {
const int kBits = primitive_util::BitWidth(primitive_type_constant);
if (kBits < 32) {
return PhiloxRngBitNarrow(key, initial_state, shape);
}
if (kBits == 32) {
return PhiloxRngBit32(key, initial_state, shape);
}
if (kBits == 64) {
return PhiloxRngBit64(key, initial_state, shape);
}
}
return {
key.builder()->ReportError(Unimplemented(
"Types other than F16, F32, F64, U16, S16, U32, S32, U64 and "
"S64 are not implemented by PhiloxBitGenerator; got %s",
primitive_util::LowercasePrimitiveTypeName(type))),
initial_state};
},
type);
}
std::pair<XlaOp, XlaOp> ScramblePhiloxKey(XlaOp key) {
Philox4x32Key pkey = Uint64ToUint32s(key);
auto state_key = ScramblePhiloxKey(pkey);
return std::make_pair(Uint128ToOp(Uint32sToUint128(state_key.first)),
Uint32sToUint64(state_key.second));
}
RngOutput UniformFloatingPointDistribution(XlaOp key, XlaOp initial_state,
BitGeneratorTy bit_generator,
XlaOp minval, XlaOp maxval,
const Shape& shape) {
RngOutput bits_state = bit_generator(key, initial_state, shape);
XlaOp bits = bits_state.value;
XlaOp new_state = bits_state.state;
return {ConvertRandomBitsToUniformFloatingPoint(bits, minval, maxval),
new_state};
}
RngOutput UniformIntDistribution(XlaOp key, XlaOp initial_state,
BitGeneratorTy bit_generator, XlaOp minval,
XlaOp maxval, const Shape& shape) {
RngOutput bits_state = bit_generator(key, initial_state, shape);
XlaOp bits = bits_state.value;
XlaOp new_state = bits_state.state;
PrimitiveType type = shape.element_type();
PrimitiveType unsigned_type;
if (type == U32 || type == S32) {
unsigned_type = U32;
} else if (type == U64 || type == S64) {
unsigned_type = U64;
} else {
return {key.builder()->ReportError(Unimplemented(
"Types other than U32, S32, U64 and S64 "
"are not implemented by UniformIntDistribution; got %s",
primitive_util::LowercasePrimitiveTypeName(type))),
initial_state};
}
return {
ConvertRandomBitsToUniformInt(bits, minval, maxval, type, unsigned_type),
new_state};
}
RngOutput NormalFloatingPointDistribution(XlaOp key, XlaOp initial_state,
BitGeneratorTy bit_generator,
const Shape& shape) {
XlaBuilder* builder = key.builder();
PrimitiveType primitive_type = shape.element_type();
if (!(primitive_type == F16 || primitive_type == F32 ||
primitive_type == F64)) {
return {
builder->ReportError(Unimplemented(
"Types other than F16, F32 and F64 "
"are not implemented by NormalFloatingPointDistribution; got %s",
primitive_util::LowercasePrimitiveTypeName(primitive_type))),
initial_state};
}
auto shape_pair = SplitShapeIntoHalves(shape);
RngOutput bits_state = UniformFloatingPointDistribution(
key, initial_state, bit_generator,
xla::ConstantR0WithType(builder, primitive_type, 0.0),
xla::ConstantR0WithType(builder, primitive_type, 1.0),
shape_pair.concat_shape);
XlaOp bits_0 = Slice(bits_state.value,
std::vector<int64_t>(shape_pair.half_shape.rank(), 0),
shape_pair.half_shape.dimensions(),
std::vector<int64_t>(shape_pair.half_shape.rank(), 1));
std::vector<int64_t> bits_1_starts(shape_pair.half_shape.rank(), 0);
bits_1_starts[shape_pair.new_concat_dim] = 1;
XlaOp bits_1 = Slice(bits_state.value, bits_1_starts,
shape_pair.concat_shape.dimensions(),
std::vector<int64_t>(shape_pair.half_shape.rank(), 1));
std::tie(bits_0, bits_1) = BoxMullerTransform(bits_0, bits_1);
XlaOp normal = CombineShapePair({bits_0, bits_1}, shape_pair, shape);
return {normal, bits_state.state};
}
} | #include "xla/hlo/builder/lib/prng.h"
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class PrngTest : public ClientLibraryTestBase {
public:
template <PrimitiveType value_type, PrimitiveType bit_type,
typename ValueT = typename primitive_util::PrimitiveTypeToNative<
value_type>::type,
typename BitT =
typename primitive_util::PrimitiveTypeToNative<bit_type>::type>
void TestConvertRandomBitsToUniformFloatingPoint(uint32_t bits, float minval,
float maxval) {
XlaBuilder builder("convert_random_bits_to_uniform_floating_point");
XlaOp bits_op = ConstantR0<BitT>(&builder, static_cast<BitT>(bits));
XlaOp minval_op = ConstantR0<ValueT>(&builder, static_cast<ValueT>(minval));
XlaOp maxval_op = ConstantR0<ValueT>(&builder, static_cast<ValueT>(maxval));
XlaOp seed = ConstantR0<uint64_t>(&builder, 42);
XlaOp initial_state = Zero(&builder, PrimitiveType::U64);
BitGeneratorTy bit_generator = [](XlaOp key, XlaOp state,
const Shape& shape) {
state = ConcatScalars(key.builder(), {key, state});
XlaOp result =
RngBitGenerator(RandomAlgorithm::RNG_DEFAULT, state, shape);
return RngOutput{GetTupleElement(result, 1),
GetTupleElement(result, 0)};
};
const Shape rng_shape = builder.GetShape(bits_op).value();
EXPECT_EQ(rng_shape.element_type(), bit_type);
RngOutput rng_output = UniformFloatingPointDistribution(
seed, initial_state, bit_generator, minval_op, maxval_op, rng_shape);
if (rng_output.value.valid()) {
XlaOp result = rng_output.value;
EXPECT_EQ(builder.GetShape(result).value().element_type(), value_type);
XlaOp result_ge_min = Ge(result, minval_op);
XlaOp result_lt_max = Lt(result, maxval_op);
And(result_ge_min, result_lt_max);
ComputeAndCompareR0<bool>(&builder, true, {});
} else {
EXPECT_EQ(builder.first_error().code(),
absl::StatusCode::kInvalidArgument);
}
}
};
XLA_TEST_F(PrngTest, RandomBitsToUniformFloatingPointInvalidArguments) {
TestConvertRandomBitsToUniformFloatingPoint<PrimitiveType::F32,
PrimitiveType::U16>(0x1234, 0.0f,
1.0f);
TestConvertRandomBitsToUniformFloatingPoint<PrimitiveType::F16,
PrimitiveType::U8>(0x12, 0.0f,
1.0f);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/prng.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/prng_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23a4b866-72e7-4211-9fcd-13bf74a06165 | cpp | tensorflow/tensorflow | optimize_cross_host_control_deps | tensorflow/core/common_runtime/optimize_cross_host_control_deps.cc | tensorflow/core/common_runtime/optimize_cross_host_control_deps_test.cc | #include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace {
Status BuildNoopNode(const Node& source, StringPiece name, const string& device,
Graph* graph, Node** node) {
NodeDefBuilder builder(name, "NoOp", NodeDebugInfo(source));
if (!device.empty()) {
builder.Device(device);
}
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(*node, graph->AddNode(def));
if (!device.empty()) {
(*node)->set_assigned_device_name(device);
}
return absl::OkStatus();
}
Status BuildIdentityNNode(const Node& source, StringPiece name,
const string& device, Graph* graph,
std::vector<NodeDefBuilder::NodeOut>& inputs,
Node** node) {
NodeDefBuilder builder(name, "IdentityN", NodeDebugInfo(source));
if (!device.empty()) {
builder.Device(device);
}
builder.Input(inputs);
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(*node, graph->AddNode(def));
if (!device.empty()) {
(*node)->set_assigned_device_name(device);
}
return absl::OkStatus();
}
Status BuildIdentityNode(const Node& source, StringPiece name,
const string& device, Graph* graph,
std::vector<NodeDefBuilder::NodeOut>& inputs,
Node** node) {
NodeDefBuilder builder(name, "Identity", NodeDebugInfo(source));
if (!device.empty()) {
builder.Device(device);
}
builder.Input(inputs[0]);
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(*node, graph->AddNode(def));
if (!device.empty()) {
(*node)->set_assigned_device_name(device);
}
return absl::OkStatus();
}
const string& RequestedOrAssignedDevice(const Node* n) {
if (!n->assigned_device_name().empty()) {
return n->assigned_device_name();
}
return n->requested_device();
}
class DeviceLookup {
public:
DeviceLookup() = default;
static absl::StatusOr<DeviceLookup> FromGraph(Graph* graph) {
DeviceLookup lookup;
for (Node* n : graph->op_nodes()) {
string device;
TF_RETURN_IF_ERROR(DeviceNameUtils::DeviceNameToCpuDeviceName(
RequestedOrAssignedDevice(n), &device));
auto iter = lookup.device_name_to_id_.find(device);
int id;
if (iter == lookup.device_name_to_id_.end()) {
id = lookup.device_name_to_id_.size();
lookup.device_name_to_id_[device] = id;
lookup.device_id_to_name_[id] = device;
} else {
id = iter->second;
}
lookup.node_to_device_id_[n] = id;
}
for (auto& [device1, id1] : lookup.device_name_to_id_) {
for (auto& [device2, id2] : lookup.device_name_to_id_) {
bool b = DeviceNameUtils::IsSameAddressSpace(device1, device2);
lookup.is_same_address_space_[std::make_pair(id1, id2)] = b;
}
}
return lookup;
}
inline int NodeToDeviceId(const Node* node) {
return node_to_device_id_[node];
}
inline string DeviceIdToName(int id) { return device_id_to_name_[id]; }
inline bool IsSameAddressSpace(int id1, int id2) {
return is_same_address_space_[std::make_pair(id1, id2)];
}
private:
absl::flat_hash_map<int, string> device_id_to_name_;
absl::flat_hash_map<string, int> device_name_to_id_;
absl::flat_hash_map<const Node*, int> node_to_device_id_;
absl::flat_hash_map<std::pair<int, int>, bool> is_same_address_space_;
};
}
Status OptimizeCrossHostControlOutputEdges(Graph* graph,
int cross_host_edges_threshold) {
TF_ASSIGN_OR_RETURN(DeviceLookup lookup, DeviceLookup::FromGraph(graph));
for (Node* n : graph->op_nodes()) {
if (n->out_edges().size() < cross_host_edges_threshold) {
continue;
}
absl::flat_hash_map<int, std::vector<const Edge*>> cross_host_control_edges;
int src_device_id = lookup.NodeToDeviceId(n);
for (const Edge* edge : n->out_edges()) {
if (!edge->IsControlEdge() || edge->dst()->IsSink()) {
continue;
}
int dst_device_id = lookup.NodeToDeviceId(edge->dst());
if (lookup.IsSameAddressSpace(src_device_id, dst_device_id)) {
continue;
}
auto iter = cross_host_control_edges.find(dst_device_id);
if (iter == cross_host_control_edges.end()) {
cross_host_control_edges[dst_device_id] = {edge};
} else {
iter->second.push_back(edge);
}
}
for (const auto& pair : cross_host_control_edges) {
if (pair.second.size() < cross_host_edges_threshold) {
continue;
}
string device = lookup.DeviceIdToName(pair.first);
VLOG(1) << "Optmize cross host output control edge, src node: "
<< n->name()
<< " src device: " << lookup.DeviceIdToName(src_device_id)
<< " dst host device: " << device
<< " edges size: " << pair.second.size();
Node* control_after;
TF_RETURN_IF_ERROR(BuildNoopNode(
*n, graph->NewName(strings::StrCat(n->name(), "/", "control_after")),
device, graph, &control_after));
graph->AddControlEdge(n, control_after, true);
for (const Edge* edge : pair.second) {
graph->AddControlEdge(control_after, edge->dst(),
true);
graph->RemoveEdge(edge);
}
}
}
return absl::OkStatus();
}
Status OptimizeCrossHostDataOutputEdges(Graph* graph,
int cross_host_edges_threshold) {
TF_ASSIGN_OR_RETURN(DeviceLookup lookup, DeviceLookup::FromGraph(graph));
for (Node* n : graph->op_nodes()) {
if (n->out_edges().size() < cross_host_edges_threshold) {
continue;
}
absl::flat_hash_map<int, std::vector<const Edge*>> cross_host_edges;
int src_id = lookup.NodeToDeviceId(n);
for (const Edge* edge : n->out_edges()) {
Node* dst = edge->dst();
if (edge->IsControlEdge() || dst->IsSink()) {
continue;
}
int dst_id = lookup.NodeToDeviceId(dst);
if (lookup.IsSameAddressSpace(src_id, dst_id)) {
continue;
}
auto iter = cross_host_edges.find(dst_id);
if (iter == cross_host_edges.end()) {
cross_host_edges[dst_id] = {edge};
} else {
iter->second.push_back(edge);
}
}
for (const auto& pair : cross_host_edges) {
if (pair.second.size() < cross_host_edges_threshold) {
continue;
}
if (pair.second.empty()) {
continue;
}
int device_id = pair.first;
Node* node0 = pair.second[0]->dst();
if (std::all_of(pair.second.begin(), pair.second.end(),
[node0](const Edge* e) { return e->dst() == node0; })) {
continue;
}
string device = lookup.DeviceIdToName(device_id);
VLOG(1) << "Optimize cross host output edge, src node: " << n->name()
<< " src device: " << lookup.DeviceIdToName(src_id)
<< " dst host device: " << device
<< " edges size: " << pair.second.size();
Node* data_after;
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.reserve(pair.second.size());
const Edge* edge0 = pair.second[0];
if (std::all_of(pair.second.begin(), pair.second.end(),
[edge0](const Edge* e) {
return e->src() == edge0->src() &&
e->src_output() == edge0->src_output();
})) {
inputs.emplace_back(edge0->src()->name(), edge0->src_output(),
edge0->src()->output_type(edge0->src_output()));
TF_RETURN_IF_ERROR(BuildIdentityNode(
*n, graph->NewName(strings::StrCat(n->name(), "/", "data_after")),
device, graph, inputs, &data_after));
graph->AddEdge(edge0->src(), edge0->src_output(), data_after, 0);
int i = 0;
for (const Edge* edge : pair.second) {
graph->AddEdge(data_after, 0, edge->dst(), edge->dst_input());
graph->RemoveEdge(edge);
i++;
}
} else {
for (const Edge* edge : pair.second) {
inputs.emplace_back(edge->src()->name(), edge->src_output(),
edge->src()->output_type(edge->src_output()));
}
TF_RETURN_IF_ERROR(BuildIdentityNNode(
*n, graph->NewName(strings::StrCat(n->name(), "/", "data_after")),
device, graph, inputs, &data_after));
int i = 0;
for (const Edge* edge : pair.second) {
graph->AddEdge(data_after, i, edge->dst(), edge->dst_input());
graph->AddEdge(edge->src(), edge->src_output(), data_after, i);
graph->RemoveEdge(edge);
i++;
}
}
}
}
return absl::OkStatus();
}
Status OptimizeCrossHostControlInputEdges(Graph* graph,
int cross_host_edges_threshold) {
TF_ASSIGN_OR_RETURN(DeviceLookup lookup, DeviceLookup::FromGraph(graph));
absl::flat_hash_map<Node*, std::vector<const Edge*>> node_control_input_edges;
for (Node* n : graph->op_nodes()) {
for (const Edge* edge : n->out_edges()) {
if (!edge->IsControlEdge() || edge->dst()->IsSink()) {
continue;
}
Node* dst = edge->dst();
auto iter = node_control_input_edges.find(dst);
if (iter == node_control_input_edges.end()) {
node_control_input_edges[dst] = {edge};
} else {
node_control_input_edges[dst].push_back(edge);
}
}
}
for (auto& pair : node_control_input_edges) {
Node* dst = pair.first;
const std::vector<const Edge*>& input_edges = pair.second;
if (input_edges.size() < cross_host_edges_threshold) {
continue;
}
absl::flat_hash_map<int, std::vector<const Edge*>> cross_host_control_edges;
int dst_device_id = lookup.NodeToDeviceId(dst);
for (const Edge* edge : input_edges) {
int src_device_id = lookup.NodeToDeviceId(edge->src());
if (lookup.IsSameAddressSpace(src_device_id, dst_device_id)) {
continue;
}
auto iter = cross_host_control_edges.find(src_device_id);
if (iter == cross_host_control_edges.end()) {
cross_host_control_edges[src_device_id] = {edge};
} else {
iter->second.push_back(edge);
}
}
for (const auto& pair : cross_host_control_edges) {
if (pair.second.size() < cross_host_edges_threshold) {
continue;
}
string src_device = lookup.DeviceIdToName(pair.first);
VLOG(1) << "Optmize cross host input control edge, dst node: "
<< dst->name()
<< " dst device: " << lookup.DeviceIdToName(dst_device_id)
<< " src host device: " << src_device
<< " edges size: " << pair.second.size();
Node* control_before;
TF_RETURN_IF_ERROR(BuildNoopNode(
*dst,
graph->NewName(strings::StrCat(dst->name(), "/", "control_before")),
src_device, graph, &control_before));
graph->AddControlEdge(control_before, dst, true);
for (const Edge* edge : pair.second) {
graph->AddControlEdge(edge->src(), control_before,
true);
graph->RemoveEdge(edge);
}
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Node* GetNodeByName(const string& name, Graph* graph) {
for (Node* node : graph->op_nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
TEST(OptimizeCrossHostControlDepsTest, OptimizeCrossHostControlOutputEdges) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto a = ops::Const(scope.WithOpName("a"), 1.0f);
a.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Const(scope.WithOpName("b").WithControlDependencies(a), 2.0f);
b.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
auto c = ops::Const(scope.WithOpName("c").WithControlDependencies(a), 3.0f);
c.node()->set_assigned_device_name("/job:worker/task:1/CPU:1");
auto d = ops::Const(scope.WithOpName("d").WithControlDependencies(a), 4.0f);
d.node()->set_assigned_device_name("/job:worker/task:1/CPU:2");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, 10));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 5);
Node* control_after = GetNodeByName("a/control_after/_0", &graph);
ASSERT_NE(control_after, nullptr);
EXPECT_EQ(control_after->op_def().name(), "NoOp");
EXPECT_EQ(control_after->assigned_device_name(),
"/job:worker/task:1/device:CPU:0");
}
TEST(OptimizeCrossHostControlDepsTest, OptimizeCrossHostDataOutputEdges) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto c1 = ops::Const(scope.WithOpName("c1"), 1.0f);
auto c2 = ops::Const(scope.WithOpName("c2"), 2.0f);
auto a = ops::IdentityN(scope.WithOpName("a"), {c1, c2});
a.operation.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Identity(scope.WithOpName("b"), a[0]);
b.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
auto c = ops::Identity(scope.WithOpName("c"), a[1]);
c.node()->set_assigned_device_name("/job:worker/task:1/CPU:1");
auto d = ops::Identity(scope.WithOpName("d"), a[0]);
d.node()->set_assigned_device_name("/job:worker/task:2/CPU:0");
auto e = ops::Identity(scope.WithOpName("e"), a[1]);
e.node()->set_assigned_device_name("/job:worker/task:2/CPU:1");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 7);
TF_ASSERT_OK(OptimizeCrossHostDataOutputEdges(
&graph, 10));
ASSERT_EQ(graph.num_op_nodes(), 7);
TF_ASSERT_OK(OptimizeCrossHostDataOutputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 9);
Node* data_after1 = GetNodeByName("a/data_after/_0", &graph);
Node* data_after2 = GetNodeByName("a/data_after/_1", &graph);
if (data_after1->assigned_device_name() ==
"/job:worker/task:2/device:CPU:0") {
std::swap(data_after1, data_after2);
}
ASSERT_NE(data_after1, nullptr);
EXPECT_EQ(data_after1->op_def().name(), "IdentityN");
EXPECT_EQ(data_after1->assigned_device_name(),
"/job:worker/task:1/device:CPU:0");
EXPECT_EQ(data_after1->def().input_size(), 2);
EXPECT_EQ(data_after1->def().input(0), "a");
EXPECT_EQ(data_after1->def().input(1), "a:1");
EXPECT_EQ(data_after1->op_def().name(), "IdentityN");
ASSERT_NE(data_after2, nullptr);
EXPECT_EQ(data_after2->op_def().name(), "IdentityN");
EXPECT_EQ(data_after2->assigned_device_name(),
"/job:worker/task:2/device:CPU:0");
EXPECT_EQ(data_after2->def().input_size(), 2);
EXPECT_EQ(data_after2->def().input(0), "a");
EXPECT_EQ(data_after2->def().input(1), "a:1");
EXPECT_EQ(data_after2->op_def().name(), "IdentityN");
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
std::unordered_map<string, const NodeDef*> map;
for (auto& node : graph_def.node()) {
map[node.name()] = &node;
}
EXPECT_EQ(map["b"]->input(0), data_after1->name());
EXPECT_EQ(map["c"]->input(0), data_after1->name() + ":1");
EXPECT_EQ(map["d"]->input(0), data_after2->name());
EXPECT_EQ(map["e"]->input(0), data_after2->name() + ":1");
}
TEST(OptimizeCrossHostControlDepsTest,
CreatesIdentityNodesWhenInputsIdentical) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto c1 = ops::Const(scope.WithOpName("c1"), 1.0f);
auto c2 = ops::Const(scope.WithOpName("c2"), 2.0f);
auto a = ops::IdentityN(scope.WithOpName("a"), {c1, c2});
a.operation.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Identity(scope.WithOpName("b"), a[0]);
auto c = ops::Identity(scope.WithOpName("c"), a[0]);
auto d = ops::Identity(scope.WithOpName("d"), a[0]);
auto e = ops::Identity(scope.WithOpName("e"), a[0]);
b.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
c.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
d.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
e.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 7);
TF_ASSERT_OK(OptimizeCrossHostDataOutputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 8);
Node* data_after = GetNodeByName("a/data_after/_0", &graph);
ASSERT_NE(data_after, nullptr);
EXPECT_EQ(data_after->op_def().name(), "Identity");
EXPECT_EQ(data_after->assigned_device_name(),
"/job:worker/task:1/device:CPU:0");
EXPECT_EQ(data_after->def().input_size(), 1);
EXPECT_EQ(data_after->def().input(0)[0], 'a');
EXPECT_EQ(data_after->op_def().name(), "Identity");
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
std::unordered_map<string, const NodeDef*> map;
for (auto& node : graph_def.node()) {
map[node.name()] = &node;
}
EXPECT_EQ(map["b"]->input(0), data_after->name());
EXPECT_EQ(map["c"]->input(0), data_after->name());
EXPECT_EQ(map["d"]->input(0), data_after->name());
EXPECT_EQ(map["e"]->input(0), data_after->name());
}
TEST(OptimizeCrossHostControlDepsTest, OptimizeCrossHostControlInputEdges) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto a = ops::Const(scope.WithOpName("a"), 1.0f);
a.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Const(scope.WithOpName("b"), 2.0f);
b.node()->set_assigned_device_name("/job:worker/task:0/CPU:1");
auto c = ops::Const(scope.WithOpName("c"), 1.0f);
c.node()->set_assigned_device_name("/job:worker/task:0/CPU:2");
auto d = ops::Const(
scope.WithOpName("d").WithControlDependencies({a.op(), b.op(), c.op()}),
4.0f);
d.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, 10));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlInputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 5);
Node* control_before = GetNodeByName("d/control_before/_0", &graph);
ASSERT_NE(control_before, nullptr);
EXPECT_EQ(control_before->op_def().name(), "NoOp");
EXPECT_EQ(control_before->assigned_device_name(),
"/job:worker/task:0/device:CPU:0");
}
TEST(OptimizeCrossHostControlDepsTest, LargeGraph) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
constexpr int size = 750;
std::vector<Operation> layer1;
for (int i = 0; i < size; ++i) {
auto n = ops::Const(scope, 1.0f);
n.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
layer1.push_back(n.op());
}
for (int j = 0; j < size; ++j) {
auto d = ops::Const(scope.WithControlDependencies(layer1), 1.0f);
d.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
}
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), size * 2);
TF_ASSERT_OK(OptimizeCrossHostControlInputEdges(
&graph, size));
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, size));
ASSERT_EQ(graph.num_op_nodes(), size * 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_cross_host_control_deps.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_cross_host_control_deps_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a5566ba-75db-493f-aca1-ceb37b0190f6 | cpp | google/tensorstore | google_auth_provider | tensorstore/internal/oauth2/google_auth_provider.cc | tensorstore/internal/oauth2/google_auth_provider_test.cc | #include "tensorstore/internal/oauth2/google_auth_provider.h"
#include <algorithm>
#include <fstream>
#include <functional>
#include <memory>
#include <new>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_oauth2 {
namespace {
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::JoinPath;
constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING";
constexpr char kGoogleApplicationCredentials[] =
"GOOGLE_APPLICATION_CREDENTIALS";
constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG";
constexpr char kGCloudConfigFolder[] = ".config/gcloud/";
constexpr char kWellKnownCredentialsFile[] =
"application_default_credentials.json";
constexpr char kOAuthV3Url[] = "https:
bool IsFile(const std::string& filename) {
std::ifstream fstream(filename.c_str());
return fstream.good();
}
Result<std::string> GetEnvironmentVariableFileName() {
auto env = GetEnv(kGoogleApplicationCredentials);
if (!env || !IsFile(*env)) {
return absl::NotFoundError(tensorstore::StrCat(
"$", kGoogleApplicationCredentials, " is not set or corrupt."));
}
return *env;
}
Result<std::string> GetWellKnownFileName() {
std::string result;
auto config_dir_override = GetEnv(kCloudSdkConfig);
if (config_dir_override) {
result = JoinPath(*config_dir_override, kWellKnownCredentialsFile);
} else {
auto home_dir = GetEnv("HOME");
if (!home_dir) {
return absl::NotFoundError("Could not read $HOME.");
}
result =
JoinPath(*home_dir, kGCloudConfigFolder, kWellKnownCredentialsFile);
}
if (!IsFile(result)) {
return absl::NotFoundError(
tensorstore::StrCat("Could not find the credentials file in the "
"standard gcloud location [",
result, "]"));
}
return result;
}
struct AuthProviderRegistry {
std::vector<std::pair<int, GoogleAuthProvider>> providers;
absl::Mutex mutex;
};
AuthProviderRegistry& GetGoogleAuthProviderRegistry() {
static absl::NoDestructor<AuthProviderRegistry> registry;
return *registry;
}
Result<std::unique_ptr<AuthProvider>> GetDefaultGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport) {
std::unique_ptr<AuthProvider> result;
auto var = GetEnv(kGoogleAuthTokenForTesting);
if (var) {
ABSL_LOG(INFO) << "Using GOOGLE_AUTH_TOKEN_FOR_TESTING";
result.reset(new FixedTokenAuthProvider(*std::move(var)));
return std::move(result);
}
absl::Status status;
auto credentials_filename = GetEnvironmentVariableFileName();
if (!credentials_filename) {
credentials_filename = GetWellKnownFileName();
}
if (credentials_filename.ok()) {
ABSL_LOG(INFO) << "Using credentials at " << *credentials_filename;
std::ifstream credentials_fstream(*credentials_filename);
auto json = ::nlohmann::json::parse(credentials_fstream, nullptr, false);
auto refresh_token = internal_oauth2::ParseRefreshToken(json);
if (refresh_token.ok()) {
ABSL_LOG(INFO) << "Using OAuth2 AuthProvider";
result.reset(new OAuth2AuthProvider(*refresh_token, kOAuthV3Url,
std::move(transport)));
return std::move(result);
}
auto service_account =
internal_oauth2::ParseGoogleServiceAccountCredentials(json);
if (service_account.ok()) {
ABSL_LOG(INFO) << "Using ServiceAccount AuthProvider";
result.reset(new GoogleServiceAccountAuthProvider(*service_account,
std::move(transport)));
return std::move(result);
}
status = absl::UnknownError(
tensorstore::StrCat("Unexpected content of the JSON credentials file: ",
*credentials_filename));
}
if (auto gce_service_account =
GceAuthProvider::GetDefaultServiceAccountInfoIfRunningOnGce(
transport.get());
gce_service_account.ok()) {
ABSL_LOG(INFO) << "Running on GCE, using service account "
<< gce_service_account->email;
result.reset(
new GceAuthProvider(std::move(transport), *gce_service_account));
return std::move(result);
}
if (!credentials_filename.ok()) {
ABSL_LOG(ERROR)
<< credentials_filename.status().message()
<< ". You may specify a credentials file using $"
<< kGoogleApplicationCredentials
<< ", or to use Google application default credentials, run: "
"gcloud auth application-default login";
}
TENSORSTORE_RETURN_IF_ERROR(status);
return absl::NotFoundError(
"Could not locate the credentials file and not running on GCE.");
}
struct SharedGoogleAuthProviderState {
absl::Mutex mutex;
std::optional<Result<std::shared_ptr<AuthProvider>>> auth_provider
ABSL_GUARDED_BY(mutex);
};
SharedGoogleAuthProviderState& GetSharedGoogleAuthProviderState() {
static absl::NoDestructor<SharedGoogleAuthProviderState> state;
return *state;
}
}
void RegisterGoogleAuthProvider(GoogleAuthProvider provider, int priority) {
auto& registry = GetGoogleAuthProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
registry.providers.emplace_back(priority, std::move(provider));
std::sort(registry.providers.begin(), registry.providers.end(),
[](const auto& a, const auto& b) { return a.first < b.first; });
}
Result<std::unique_ptr<AuthProvider>> GetGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport) {
{
auto& registry = GetGoogleAuthProviderRegistry();
absl::ReaderMutexLock lock(®istry.mutex);
for (const auto& provider : registry.providers) {
auto auth_result = provider.second();
if (auth_result.ok()) return auth_result;
}
}
return internal_oauth2::GetDefaultGoogleAuthProvider(std::move(transport));
}
Result<std::shared_ptr<AuthProvider>> GetSharedGoogleAuthProvider() {
auto& state = GetSharedGoogleAuthProviderState();
absl::MutexLock lock(&state.mutex);
if (!state.auth_provider) {
state.auth_provider.emplace(GetGoogleAuthProvider());
}
return *state.auth_provider;
}
void ResetSharedGoogleAuthProvider() {
auto& state = GetSharedGoogleAuthProviderState();
absl::MutexLock lock(&state.mutex);
state.auth_provider = std::nullopt;
}
}
} | #include "tensorstore/internal/oauth2/google_auth_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_test_utils.h"
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_http::ApplyResponseToHandler;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpResponseHandler;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
using ::tensorstore::internal_oauth2::AuthProvider;
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::GetGoogleAuthProvider;
using ::tensorstore::internal_oauth2::GoogleAuthTestScope;
class TestData
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteApplicationDefaultCredentials() {
auto p = JoinPath(path(), "application_default_credentials.json");
std::ofstream ofs(p);
ofs << R"({
"client_id": "fake-client-id.apps.googleusercontent.com",
"client_secret": "fake-client-secret",
"refresh_token": "fake-refresh-token",
"type": "authorized_user"
})";
return p;
}
std::string WriteServiceAccountCredentials() {
auto p = JoinPath(path(), "service_account_credentials.json");
std::ofstream ofs(p);
ofs << R"({
"type": "service_account",
"project_id": "fake_project_id",
"private_key_id": "fake_key_id",
"client_email": "fake-test-project.iam.gserviceaccount.com",
"client_id": "fake_client_id",
"auth_uri": "https:
"token_uri": "https:
"auth_provider_x509_cert_url": "https:
"client_x509_cert_url": "https:
)";
ofs << " \"private_key\": \"" << absl::CEscape(GetFakePrivateKey())
<< "\" }";
return p;
}
};
class MetadataMockTransport : public HttpTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override {
ApplyResponseToHandler(
[&]() -> tensorstore::Result<HttpResponse> {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (!absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal")) {
return absl::UnimplementedError("Mock cannot satisfy the request.");
}
constexpr char kOAuthPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/[email protected]/token";
if (absl::StartsWith(parsed.authority_and_path, kOAuthPath)) {
if (!has_service_account_) {
return HttpResponse{404, absl::Cord()};
}
return HttpResponse{
200,
absl::Cord(
R"({ "token_type" : "refresh", "access_token": "abc", "expires_in": 3600 })")};
}
constexpr char kServiceAccountPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/default/";
if (absl::StartsWith(parsed.authority_and_path,
kServiceAccountPath)) {
if (!has_service_account_) {
return HttpResponse{404, absl::Cord()};
}
return HttpResponse{
200,
absl::Cord(
R"({ "email": "[email protected]", "scopes": [ "test" ] })")};
}
return HttpResponse{200, absl::Cord()};
}(),
response_handler);
}
void set_has_service_account(bool has_service_account) {
has_service_account_ = has_service_account;
}
bool has_service_account_ = false;
};
class GoogleAuthProviderTest : public ::testing::Test {
public:
GoogleAuthTestScope google_auth_test_scope;
static void SetUpTestSuite() {
SetDefaultHttpTransport(mock_transport);
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
}
static void TearDownTestSuite() {
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
SetDefaultHttpTransport(nullptr);
}
static std::shared_ptr<MetadataMockTransport> mock_transport;
};
std::shared_ptr<MetadataMockTransport> GoogleAuthProviderTest::mock_transport =
std::make_shared<MetadataMockTransport>();
TEST_F(GoogleAuthProviderTest, Invalid) {
SetEnv("GCE_METADATA_ROOT", "invalidmetadata.google.internal");
auto auth_provider = GetGoogleAuthProvider();
EXPECT_FALSE(auth_provider.ok());
UnsetEnv("GCE_METADATA_ROOT");
}
TEST_F(GoogleAuthProviderTest, AuthTokenForTesting) {
SetEnv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "abc");
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::FixedTokenAuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
std::unique_ptr<AuthProvider> auth = std::move(*auth_provider);
auto token = auth->GetToken();
ASSERT_TRUE(token.ok());
EXPECT_EQ("abc", token->token);
}
TEST_F(GoogleAuthProviderTest, GoogleOAuth2AccountCredentialsFromSDKConfig) {
TestData test_data;
test_data.WriteServiceAccountCredentials();
test_data.WriteApplicationDefaultCredentials();
SetEnv("CLOUDSDK_CONFIG", test_data.path().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::OAuth2AuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GoogleOAuth2AccountCredentials) {
TestData test_data;
SetEnv("GOOGLE_APPLICATION_CREDENTIALS",
test_data.WriteApplicationDefaultCredentials().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::OAuth2AuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GoogleServiceAccountCredentials) {
TestData test_data;
SetEnv("GOOGLE_APPLICATION_CREDENTIALS",
test_data.WriteServiceAccountCredentials().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance = dynamic_cast<
tensorstore::internal_oauth2::GoogleServiceAccountAuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GceWithServiceAccount) {
mock_transport->set_has_service_account(true);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto auth_provider, GetGoogleAuthProvider());
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::GceAuthProvider*>(
auth_provider.get());
EXPECT_FALSE(instance == nullptr);
}
EXPECT_THAT(auth_provider->GetAuthHeader(),
::testing::Optional(std::string("Authorization: Bearer abc")));
}
TEST_F(GoogleAuthProviderTest, GceWithoutServiceAccount) {
mock_transport->set_has_service_account(false);
EXPECT_THAT(GetGoogleAuthProvider(),
tensorstore::MatchesStatus(absl::StatusCode::kNotFound));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8a5dcfca-2d54-4353-bcdd-af9c476f0366 | cpp | tensorflow/tensorflow | rng_converter_utils | tensorflow/compiler/tf2xla/kernels/rng_converter_utils.cc | tensorflow/compiler/tf2xla/kernels/rng_converter_utils_test.cc | #include "tensorflow/compiler/tf2xla/kernels/rng_converter_utils.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
Algorithm ToTensorflowAlgorithm(xla::RandomAlgorithm alg) {
switch (alg) {
case xla::RandomAlgorithm::RNG_PHILOX:
return RNG_ALG_PHILOX;
case xla::RandomAlgorithm::RNG_THREE_FRY:
return RNG_ALG_THREEFRY;
case xla::RandomAlgorithm::RNG_DEFAULT:
default:
return RNG_ALG_AUTO_SELECT;
}
}
xla::RandomAlgorithm DefaultRngAlgForDeviceType(
absl::string_view device_type_string) {
if (device_type_string == DEVICE_GPU_XLA_JIT ||
device_type_string == DEVICE_CPU_XLA_JIT) {
return xla::RandomAlgorithm::RNG_PHILOX;
} else {
return xla::RandomAlgorithm::RNG_DEFAULT;
}
}
} | #include "tensorflow/compiler/tf2xla/kernels/rng_converter_utils.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/rng_alg.h"
namespace tensorflow {
namespace {
TEST(RngConverterUtilsTest, DefaultRngForCPUEqualsGPU) {
EXPECT_EQ(DefaultRngAlgForDeviceType(DEVICE_CPU_XLA_JIT),
DefaultRngAlgForDeviceType(DEVICE_GPU_XLA_JIT));
}
TEST(RngConverterUtilsTest, UnknownDeviceIsDefault) {
EXPECT_EQ(DefaultRngAlgForDeviceType("UNKNOWN DEVICE"),
xla::RandomAlgorithm::RNG_DEFAULT);
}
TEST(RngConverterUtilsTest, TensorflowAutoSelects) {
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_DEFAULT),
tensorflow::RNG_ALG_AUTO_SELECT);
}
TEST(RngConverterUtilsTest, ToTensorflow) {
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_PHILOX),
tensorflow::RNG_ALG_PHILOX);
EXPECT_EQ(ToTensorflowAlgorithm(xla::RandomAlgorithm::RNG_THREE_FRY),
tensorflow::RNG_ALG_THREEFRY);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/rng_converter_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/rng_converter_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a5ecc6e9-0c1c-4e01-9180-c293cbfd50be | cpp | tensorflow/tensorflow | reference_util | third_party/xla/xla/reference_util.cc | third_party/xla/xla/reference_util_test.cc | #include "xla/reference_util.h"
#include <array>
#include <cmath>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/padding.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/tsl/lib/math/math_util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
std::unique_ptr<Array2D<double>> ReferenceUtil::Array2DF32ToF64(
const Array2D<float>& input) {
auto result =
std::make_unique<Array2D<double>>(input.height(), input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) = input(rowno, colno);
}
}
return result;
}
std::unique_ptr<Array3D<float>> ReferenceUtil::ConvArray3D(
const Array3D<float>& lhs, const Array3D<float>& rhs, int64_t kernel_stride,
Padding padding) {
return ConvArray3DGeneralDimensionsDilated(
lhs, rhs, kernel_stride, padding, 1, 1,
XlaBuilder::CreateDefaultConvDimensionNumbers(1));
}
std::unique_ptr<Array3D<float>>
ReferenceUtil::ConvArray3DGeneralDimensionsDilated(
const Array3D<float>& lhs, const Array3D<float>& rhs, int64_t kernel_stride,
Padding padding, int64_t lhs_dilation, int64_t rhs_dilation,
const ConvolutionDimensionNumbers& dnums) {
CHECK_EQ(dnums.input_spatial_dimensions_size(), 1);
CHECK_EQ(dnums.kernel_spatial_dimensions_size(), 1);
CHECK_EQ(dnums.output_spatial_dimensions_size(), 1);
Array4D<float> a4dlhs(lhs.n1(), lhs.n2(), lhs.n3(), 1);
a4dlhs.Each([&](absl::Span<const int64_t> indices, float* value_ptr) {
CHECK_EQ(indices[3], 0);
*value_ptr = lhs.operator()(indices[0], indices[1], indices[2]);
});
Array4D<float> a4drhs(rhs.n1(), rhs.n2(), rhs.n3(), 1);
a4drhs.Each([&](absl::Span<const int64_t> indices, float* value_ptr) {
CHECK_EQ(indices[3], 0);
*value_ptr = rhs.operator()(indices[0], indices[1], indices[2]);
});
ConvolutionDimensionNumbers dnums2d = dnums;
dnums2d.add_input_spatial_dimensions(3);
dnums2d.add_kernel_spatial_dimensions(3);
dnums2d.add_output_spatial_dimensions(3);
std::unique_ptr<Array4D<float>> convr4 = ConvArray4DGeneralDimensionsDilated(
a4dlhs, a4drhs, {kernel_stride, 1}, padding, {lhs_dilation, 1},
{rhs_dilation, 1}, dnums2d);
auto convr3 = std::make_unique<Array3D<float>>(
convr4->planes(), convr4->depth(), convr4->height());
convr4->Each([&](absl::Span<const int64_t> indices, float* value_ptr) {
CHECK_EQ(indices[3], 0);
convr3->operator()(indices[0], indices[1], indices[2]) = *value_ptr;
});
return convr3;
}
std::unique_ptr<Array4D<float>> ReferenceUtil::ConvArray4D(
const Array4D<float>& lhs, const Array4D<float>& rhs,
std::pair<int64_t, int64_t> kernel_stride, Padding padding) {
return ConvArray4DGeneralDimensions(
lhs, rhs, kernel_stride, padding,
XlaBuilder::CreateDefaultConvDimensionNumbers());
}
std::unique_ptr<Array4D<float>>
ReferenceUtil::SeparableConvArray4D(const Array4D<float>& input,
const Array4D<float>& depthwise_weights,
const Array4D<float>& pointwise_weights,
std::pair<int64_t, int64_t> kernel_stride,
Padding padding) {
const int64_t depth_multiplier = depthwise_weights.planes();
CHECK_EQ(pointwise_weights.depth(), input.depth() * depth_multiplier);
Array4D<float> weights(pointwise_weights.planes(), input.depth(),
depthwise_weights.height(), depthwise_weights.width());
for (int64_t kx = 0; kx < depthwise_weights.width(); ++kx) {
for (int64_t ky = 0; ky < depthwise_weights.height(); ++ky) {
for (int64_t kz = 0; kz < input.depth(); ++kz) {
for (int64_t out = 0; out < pointwise_weights.planes(); ++out) {
float weight = 0.0;
for (int64_t depth = 0; depth < depth_multiplier; ++depth) {
weight +=
depthwise_weights(depth, kz, ky, kx) *
pointwise_weights(out, depth + kz * depth_multiplier, 0, 0);
}
weights(out, kz, ky, kx) = weight;
}
}
}
}
return ConvArray4D(input, weights, kernel_stride, padding);
}
int64_t ReferenceUtil::WindowCount(int64_t unpadded_width,
int64_t window_len,
int64_t stride,
Padding padding) {
if (padding == Padding::kValid) {
return window_util::StridedBound(unpadded_width, window_len, stride);
}
return tsl::MathUtil::CeilOfRatio(unpadded_width, stride);
}
std::unique_ptr<std::vector<float>>
ReferenceUtil::ReduceWindow1DGeneric(
absl::Span<const float> operand, float init,
absl::FunctionRef<float(float, float)> reduce_func,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
CHECK_EQ(window.size(), 1);
CHECK_EQ(stride.size(), 1);
CHECK_EQ(padding.size(), 1);
int64_t padded_width = padding[0].first + operand.size() + padding[0].second;
int64_t stride_amount = stride[0];
int64_t window_size = window[0];
int64_t result_size =
window_util::StridedBound(padded_width, window_size, stride_amount);
int64_t pad_low = padding[0].first;
auto result = std::make_unique<std::vector<float>>(result_size);
for (int64_t i0 = 0; i0 < result_size; ++i0) {
int64_t i0_base = i0 * stride_amount - pad_low;
float val = init;
for (int64_t i0_win = 0; i0_win < window_size; ++i0_win) {
if (i0_base + i0_win >= 0 && i0_base + i0_win < operand.size()) {
val = reduce_func(val, operand[i0_base + i0_win]);
}
}
(*result)[i0] = val;
}
return result;
}
std::unique_ptr<Array4D<float>>
ReferenceUtil::ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
absl::FunctionRef<float(float, float)> reduce_func,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
Padding padding) {
std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
return ReduceWindow4DGeneric(
operand, init, reduce_func, window, stride,
xla::MakePadding(dim_lengths, window, stride, padding));
}
std::unique_ptr<Array4D<float>>
ReferenceUtil::ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
absl::FunctionRef<float(float, float)> reduce_func,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> padding) {
std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
std::vector<int64_t> window_counts(window.size(), 0);
std::vector<int64_t> pad_low(window.size(), 0);
for (int64_t i = 0; i < window.size(); ++i) {
int64_t padded_width =
padding[i].first + dim_lengths[i] + padding[i].second;
window_counts[i] =
window_util::StridedBound(padded_width, window[i], stride[i]);
pad_low[i] = padding[i].first;
}
auto result = std::make_unique<Array4D<float>>(
window_counts[0], window_counts[1], window_counts[2], window_counts[3]);
for (int64_t i0 = 0; i0 < window_counts[0]; ++i0) {
for (int64_t i1 = 0; i1 < window_counts[1]; ++i1) {
for (int64_t i2 = 0; i2 < window_counts[2]; ++i2) {
for (int64_t i3 = 0; i3 < window_counts[3]; ++i3) {
int64_t i0_base = i0 * stride[0] - pad_low[0];
int64_t i1_base = i1 * stride[1] - pad_low[1];
int64_t i2_base = i2 * stride[2] - pad_low[2];
int64_t i3_base = i3 * stride[3] - pad_low[3];
float val = init;
for (int64_t i0_win = 0; i0_win < window[0]; ++i0_win) {
for (int64_t i1_win = 0; i1_win < window[1]; ++i1_win) {
for (int64_t i2_win = 0; i2_win < window[2]; ++i2_win) {
for (int64_t i3_win = 0; i3_win < window[3]; ++i3_win) {
if (i0_base + i0_win >= 0 && i1_base + i1_win >= 0 &&
i2_base + i2_win >= 0 && i3_base + i3_win >= 0 &&
i0_base + i0_win < operand.n1() &&
i1_base + i1_win < operand.n2() &&
i2_base + i2_win < operand.n3() &&
i3_base + i3_win < operand.n4()) {
val = reduce_func(
val, operand(i0_base + i0_win, i1_base + i1_win,
i2_base + i2_win, i3_base + i3_win));
}
}
}
}
}
(*result)(i0, i1, i2, i3) = val;
}
}
}
}
return result;
}
std::unique_ptr<Array4D<float>> ReferenceUtil::ReduceWindow4DAdd(
const Array4D<float>& operand, float init, absl::Span<const int64_t> window,
absl::Span<const int64_t> stride, Padding padding) {
const auto add_reduce = [](float arg1, float arg2) { return arg1 + arg2; };
return ReduceWindow4DGeneric(operand, init, add_reduce, window, stride,
padding);
}
std::unique_ptr<Array4D<float>> ReferenceUtil::BatchNorm4D(
const Array4D<float>& input, const Array4D<float>& mean,
const Array4D<float>& var, const Array4D<float>& scale,
const Array4D<float>& offset, float epsilon) {
auto normalized =
*MapArray4D(input, mean, [](float a, float b) { return a - b; });
normalized = *MapArray4D(normalized, var, [&](float a, float b) {
return a / std::sqrt(b + epsilon);
});
normalized =
*MapArray4D(normalized, scale, [](float a, float b) { return a * b; });
return MapArray4D(normalized, offset, [](float a, float b) { return a + b; });
}
std::unique_ptr<Array4D<float>>
ReferenceUtil::SelectAndScatter4DGePlus(const Array4D<float>& operand,
const Array4D<float>& source,
float init,
absl::Span<const int64_t> window,
absl::Span<const int64_t> stride,
bool same_padding) {
Padding padding = same_padding ? Padding::kSame : Padding::kValid;
auto result = std::make_unique<Array4D<float>>(operand.n1(), operand.n2(),
operand.n3(), operand.n4());
std::vector<int64_t> dim_lengths{operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
auto padding_both = xla::MakePadding(dim_lengths, window, stride, padding);
result->Fill(init);
std::vector<int64_t> window_counts(window.size(), 0);
std::vector<int64_t> pad_low(window.size(), 0);
for (int64_t i = 0; i < window.size(); ++i) {
window_counts[i] =
WindowCount(dim_lengths[i], window[i], stride[i], padding);
pad_low[i] = padding_both[i].first;
}
CHECK_EQ(window_counts[0], source.n1());
CHECK_EQ(window_counts[1], source.n2());
CHECK_EQ(window_counts[2], source.n3());
CHECK_EQ(window_counts[3], source.n4());
for (int64_t i0 = 0; i0 < window_counts[0]; ++i0) {
for (int64_t i1 = 0; i1 < window_counts[1]; ++i1) {
for (int64_t i2 = 0; i2 < window_counts[2]; ++i2) {
for (int64_t i3 = 0; i3 < window_counts[3]; ++i3) {
int64_t i0_base = i0 * stride[0] - pad_low[0];
int64_t i1_base = i1 * stride[1] - pad_low[1];
int64_t i2_base = i2 * stride[2] - pad_low[2];
int64_t i3_base = i3 * stride[3] - pad_low[3];
int64_t scatter_0 = (i0_base >= 0) ? i0_base : 0;
int64_t scatter_1 = (i1_base >= 0) ? i1_base : 0;
int64_t scatter_2 = (i2_base >= 0) ? i2_base : 0;
int64_t scatter_3 = (i3_base >= 0) ? i3_base : 0;
float val = operand(scatter_0, scatter_1, scatter_2, scatter_3);
for (int64_t i0_win = 0; i0_win < window[0]; ++i0_win) {
for (int64_t i1_win = 0; i1_win < window[1]; ++i1_win) {
for (int64_t i2_win = 0; i2_win < window[2]; ++i2_win) {
for (int64_t i3_win = 0; i3_win < window[3]; ++i3_win) {
if (i0_base + i0_win >= 0 && i1_base + i1_win >= 0 &&
i2_base + i2_win >= 0 && i3_base + i3_win >= 0 &&
i0_base + i0_win < operand.n1() &&
i1_base + i1_win < operand.n2() &&
i2_base + i2_win < operand.n3() &&
i3_base + i3_win < operand.n4()) {
float tmp = operand(i0_base + i0_win, i1_base + i1_win,
i2_base + i2_win, i3_base + i3_win);
if (tmp > val) {
val = tmp;
scatter_0 = i0_base + i0_win;
scatter_1 = i1_base + i1_win;
scatter_2 = i2_base + i2_win;
scatter_3 = i3_base + i3_win;
}
}
}
}
}
}
(*result)(scatter_0, scatter_1, scatter_2, scatter_3) +=
source(i0, i1, i2, i3);
}
}
}
}
return result;
}
std::unique_ptr<Array4D<float>>
ReferenceUtil::ConvArray4DGeneralDimensions(
const Array4D<float>& lhs, const Array4D<float>& rhs,
std::pair<int64_t, int64_t> kernel_stride, Padding padding,
ConvolutionDimensionNumbers dimension_numbers) {
return ConvArray4DGeneralDimensionsDilated(lhs, rhs, kernel_stride, padding,
{1, 1}, {1, 1},
std::move(dimension_numbers));
}
std::unique_ptr<Array4D<float>>
ReferenceUtil::ConvArray4DGeneralDimensionsDilated(
const Array4D<float>& lhs, const Array4D<float>& rhs,
std::pair<int64_t, int64_t> kernel_stride, Padding padding,
std::pair<int64_t, int64_t> lhs_dilation,
std::pair<int64_t, int64_t> rhs_dilation,
ConvolutionDimensionNumbers dnums) {
HloComputation::Builder b("ConvArray4DGeneralDimensionDilated");
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs);
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs);
std::array<int64_t, 2> ordered_kernel_strides;
std::array<int64_t, 2> ordered_input_dimensions;
std::array<int64_t, 2> ordered_kernel_dimensions;
if (dnums.kernel_spatial_dimensions(0) > dnums.kernel_spatial_dimensions(1)) {
ordered_kernel_strides[0] = kernel_stride.second;
ordered_kernel_strides[1] = kernel_stride.first;
} else {
ordered_kernel_strides[0] = kernel_stride.first;
ordered_kernel_strides[1] = kernel_stride.second;
}
ordered_input_dimensions[0] =
lhs_literal.shape().dimensions(dnums.input_spatial_dimensions(0));
ordered_input_dimensions[1] =
lhs_literal.shape().dimensions(dnums.input_spatial_dimensions(1));
ordered_kernel_dimensions[0] =
rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(0));
ordered_kernel_dimensions[1] =
rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(1));
std::vector<std::pair<int64_t, int64_t>> paddings =
MakePadding(ordered_input_dimensions, ordered_kernel_dimensions,
ordered_kernel_strides, padding);
CHECK_EQ(paddings.size(), 2);
Window window;
WindowDimension dim;
dim.set_size(
rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(0)));
dim.set_stride(kernel_stride.first);
dim.set_padding_low(paddings[0].first);
dim.set_padding_high(paddings[0].second);
dim.set_window_dilation(rhs_dilation.first);
dim.set_base_dilation(lhs_dilation.first);
*window.add_dimensions() = dim;
WindowDimension dim2;
dim2.set_size(
rhs_literal.shape().dimensions(dnums.kernel_spatial_dimensions(1)));
dim2.set_stride(kernel_stride.second);
dim2.set_padding_low(paddings[1].first);
dim2.set_padding_high(paddings[1].second);
dim2.set_window_dilation(rhs_dilation.second);
dim2.set_base_dilation(lhs_dilation.second);
*window.add_dimensions() = dim2;
const Shape shape =
ShapeInference::InferConvolveShape(
lhs_literal.shape(), rhs_literal.shape(),
1, 1, window, dnums,
std::nullopt)
.value();
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, precision_config));
HloModuleConfig config;
HloModule module("ReferenceUtil", config);
auto computation = module.AddEntryComputation(b.Build());
HloEvaluator evaluator;
Literal result_literal = evaluator.Evaluate(*computation, {}).value();
CHECK_EQ(result_literal.shape().rank(), 4);
auto result =
std::make_unique<Array4D<float>>(result_literal.shape().dimensions(0),
result_literal.shape().dimensions(1),
result_literal.shape().dimensions(2),
result_literal.shape().dimensions(3));
result->Each([&](absl::Span<const int64_t> indices, float* value) {
*value = result_literal.Get<float>(indices);
});
return result;
}
std::unique_ptr<std::vector<float>>
ReferenceUtil::ReduceToColArray2D(
const Array2D<float>& matrix, float init,
absl::FunctionRef<float(float, float)> reduce_function) {
int64_t rows = matrix.height();
int64_t cols = matrix.width();
auto result = std::make_unique<std::vector<float>>();
for (int64_t i = 0; i < rows; ++i) {
float acc = init;
for (int64_t j = 0; j < cols; ++j) {
acc = reduce_function(acc, matrix(i, j));
}
result->push_back(acc);
}
return result;
}
std::unique_ptr<std::vector<float>>
ReferenceUtil::ReduceToRowArray2D(
const Array2D<float>& matrix, float init,
absl::FunctionRef<float(float, float)> reduce_function) {
int64_t rows = matrix.height();
int64_t cols = matrix.width();
auto result = std::make_unique<std::vector<float>>();
for (int64_t i = 0; i < cols; ++i) {
float acc = init;
for (int64_t j = 0; j < rows; ++j) {
acc = reduce_function(acc, matrix(j, i));
}
result->push_back(acc);
}
return result;
}
std::vector<float> ReferenceUtil::Reduce4DTo1D(
const Array4D<float>& array, float init, absl::Span<const int64_t> dims,
absl::FunctionRef<float(float, float)> reduce_function) {
std::vector<float> result;
CHECK_EQ(dims.size(), 3);
const absl::flat_hash_set<int64_t> dim_set(dims.begin(), dims.end());
CHECK_EQ(dim_set.size(), 3);
for (int64_t a0 = 0; a0 == 0 || (!dim_set.contains(0) && a0 < array.n1());
++a0) {
for (int64_t a1 = 0; a1 == 0 || (!dim_set.contains(1) && a1 < array.n2());
++a1) {
for (int64_t a2 = 0; a2 == 0 || (!dim_set.contains(2) && a2 < array.n3());
++a2) {
for (int64_t a3 = 0;
a3 == 0 || (!dim_set.contains(3) && a3 < array.n4()); ++a3) {
float accumulator = init;
for (int64_t i0 = 0;
i0 == 0 || (dim_set.contains(0) && i0 < array.n1()); ++i0) {
for (int64_t i1 = 0;
i1 == 0 || (dim_set.contains(1) && i1 < array.n2()); ++i1) {
for (int64_t i2 = 0;
i2 == 0 || (dim_set.contains(2) && i2 < array.n3()); ++i2) {
for (int64_t i3 = 0;
i3 == 0 || (dim_set.contains(3) && i3 < array.n4());
++i3) {
if (array.n1() > 0 && array.n2() > 0 && array.n3() > 0 &&
array.n4() > 0) {
accumulator = reduce_function(
accumulator, array(a0 + i0, a1 + i1, a2 + i2, a3 + i3));
}
}
}
}
}
result.push_back(accumulator);
}
}
}
}
return result;
}
std::unique_ptr<Array4D<float>> ReferenceUtil::Broadcast1DTo4D(
const std::vector<float>& array, const std::vector<int64_t>& bounds,
int64_t broadcast_from_dim) {
auto result = std::make_unique<Array4D<float>>(bounds[0], bounds[1],
bounds[2], bounds[3]);
for (int64_t i = 0; i < result->n1(); ++i) {
for (int64_t j = 0; j < result->n2(); ++j) {
for (int64_t k = 0; k < result->n3(); ++k) {
for (int64_t l = 0; l < result->n4(); ++l) {
switch (broadcast_from_dim) {
case 0:
(*result)(i, j, k, l) = array[i];
break;
case 1:
(*result)(i, j, k, l) = array[j];
break;
case 2:
(*result)(i, j, k, l) = array[k];
break;
case 3:
(*result)(i, j, k, l) = array[l];
break;
default:
break;
}
}
}
}
}
return result;
}
std::unique_ptr<Array2D<float>> ReferenceUtil::Reduce3DTo2D(
const Array3D<float>& array, float init, absl::Span<const int64_t> dims,
absl::FunctionRef<float(float, float)> reduce_function) {
CHECK_EQ(dims.size(), 1);
int64_t rows = dims[0] == 0 ? array.n2() : array.n1();
int64_t cols = dims[0] == 2 ? array.n2() : array.n3();
auto result = std::make_unique<Array2D<float>>(rows, cols);
result->Fill(init);
for (int i0 = 0; i0 < array.n1(); ++i0) {
for (int i1 = 0; i1 < array.n2(); ++i1) {
for (int i2 = 0; i2 < array.n3(); ++i2) {
int64_t row = dims[0] == 0 ? i1 : i0;
int64_t col = dims[0] == 2 ? i1 : i2;
(*result)(row, col) =
reduce_function((*result)(row, col), array(i0, i1, i2));
}
}
}
return result;
}
std::unique_ptr<Array2D<float>> ReferenceUtil::MapArray2D(
const Array2D<float>& matrix,
absl::FunctionRef<float(float)> map_function) {
int64_t rows = matrix.height();
int64_t cols = matrix.width();
auto result = std::make_unique<Array2D<float>>(rows, cols);
for (int64_t i = 0; i < rows; ++i) {
for (int64_t j = 0; j < cols; ++j) {
(*result)(i, j) = map_function(matrix(i, j));
}
}
return result;
}
std::unique_ptr<Array2D<float>> ReferenceUtil::MapArray2D(
const Array2D<float>& lhs, const Array2D<float>& rhs,
absl::FunctionRef<float(float, float)> map_function) {
CHECK_EQ(lhs.height(), rhs.height());
CHECK_EQ(lhs.width(), rhs.width());
int64_t rows = lhs.height();
int64_t cols = rhs.width();
auto result = std::make_unique<Array2D<float>>(rows, cols);
for (int64_t i = 0; i < rows; ++i) {
for (int64_t j = 0; j < cols; ++j) {
(*result)(i, j) = map_function(lhs(i, j), rhs(i, j));
}
}
return result;
}
std::unique_ptr<Array3D<float>> ReferenceUtil::MapArray3D(
const Array3D<float>& array, absl::FunctionRef<float(float)> map_function) {
int64_t n1 = array.n1();
int64_t n2 = array.n2();
int64_t n3 = array.n3();
auto result = std::make_unique<Array3D<float>>(n1, n2, n3);
for (int64_t i = 0; i < n1; ++i) {
for (int64_t j = 0; j < n2; ++j) {
for (int64_t k = 0; k < n3; ++k) {
(*result)(i, j, k) = map_function(array(i, j, k));
}
}
}
return result;
}
std::unique_ptr<Array3D<float>> ReferenceUtil::MapArray3D(
const Array3D<float>& lhs, const Array3D<float>& rhs,
absl::FunctionRef<float(float, float)> map_function) {
CHECK_EQ(lhs.n1(), rhs.n1());
CHECK_EQ(lhs.n2(), rhs.n2());
CHECK_EQ(lhs.n3(), rhs.n3());
int64_t n1 = lhs.n1();
int64_t n2 = rhs.n2();
int64_t n3 = rhs.n3();
auto result = std::make_unique<Array3D<float>>(n1, n2, n3);
for (int64_t i = 0; i < n1; ++i) {
for (int64_t j = 0; j < n2; ++j) {
for (int64_t k = 0; k < n3; ++k) {
(*result)(i, j, k) = map_function(lhs(i, j, k), rhs(i, j, k));
}
}
}
return result;
}
std::unique_ptr<Array2D<float>> ReferenceUtil::MapWithIndexArray2D(
const Array2D<float>& matrix,
absl::FunctionRef<float(float, int64_t, int64_t)> map_function) {
int64_t rows = matrix.height();
int64_t cols = matrix.width();
auto result = std::make_unique<Array2D<float>>(rows, cols);
for (int64_t i = 0; i < rows; ++i) {
for (int64_t j = 0; j < cols; ++j) {
(*result)(i, j) = map_function(matrix(i, j), i, j);
}
}
return result;
}
} | #include "xla/reference_util.h"
#include <cmath>
#include <memory>
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/padding.h"
#include "xla/error_spec.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ReferenceUtilTest : public ::testing::Test {
protected:
ReferenceUtilTest() {
matrix_ = std::make_unique<Array2D<float>>(rows_, cols_);
for (int64_t i = 0; i < rows_; ++i) {
for (int64_t j = 0; j < cols_; ++j) {
(*matrix_)(i, j) = i * cols_ + j + 1;
}
}
}
const int64_t rows_ = 2;
const int64_t cols_ = 3;
std::unique_ptr<Array2D<float>> matrix_;
};
TEST_F(ReferenceUtilTest, TransposeArray2D) {
auto result = ReferenceUtil::TransposeArray2D(*matrix_);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 4.f}, {2.f, 5.f}, {3.f, 6.f}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MatmulArray2D) {
Array2D<float> rhs({
{7.f, 8.f},
{9.f, 10.f},
{11.f, 12.f},
});
auto result = ReferenceUtil::MatmulArray2D(*matrix_, rhs);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{58.f, 64.f}, {139.f, 154.f}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ReduceToColArray2D) {
auto add = [](float lhs, float rhs) { return lhs + rhs; };
auto result = ReferenceUtil::ReduceToColArray2D(*matrix_, 0.0f, add);
auto actual_literal = LiteralUtil::CreateR1<float>(*result);
LiteralTestUtil::ExpectR1Near<float>({6.f, 15.f}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ReduceToRowArray2D) {
auto add = [](float lhs, float rhs) { return lhs + rhs; };
auto result = ReferenceUtil::ReduceToRowArray2D(*matrix_, 0.0f, add);
auto actual_literal = LiteralUtil::CreateR1<float>(*result);
LiteralTestUtil::ExpectR1Near<float>({5.f, 7.f, 9.f}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, Array2DF32ToF64Test) {
auto result = ReferenceUtil::Array2DF32ToF64(*matrix_);
ASSERT_EQ(result->height(), matrix_->height());
ASSERT_EQ(result->width(), matrix_->width());
for (int64_t rowno = 0; rowno < matrix_->height(); ++rowno) {
for (int64_t colno = 0; colno < matrix_->width(); ++colno) {
EXPECT_EQ(static_cast<double>((*matrix_)(rowno, colno)),
(*result)(rowno, colno));
}
}
}
TEST_F(ReferenceUtilTest, Reduce4Dto1DZeroSizedArray) {
auto result = LiteralUtil::CreateR1<float>(ReferenceUtil::Reduce4DTo1D(
Array4D<float>(1, 0, 1, 1), 0, {0, 1, 2},
[](float a, float b) { return a + b; }));
LiteralTestUtil::ExpectR1Equal<float>({0}, result);
}
TEST_F(ReferenceUtilTest, MapArray2D) {
auto identity = [](float value) { return std::log(std::exp(value)); };
auto result = ReferenceUtil::MapArray2D(*matrix_, identity);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2NearArray2D(*matrix_, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapArray3D) {
auto identity = [](float value) { return std::log(std::exp(value)); };
Array3D<float> input(2, 3, 4);
input.FillIota(0);
auto result = ReferenceUtil::MapArray3D(input, identity);
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3NearArray3D(input, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapWithIndexArray2D) {
auto add_index = [](float value, int64_t row, int64_t col) {
return value + row + col;
};
auto result = ReferenceUtil::MapWithIndexArray2D(*matrix_, add_index);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f, 5.f}, {5.f, 7.f, 9.f}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapArray4D) {
auto input = std::make_unique<Array4D<float>>(2, 3,
4, 5);
input->FillWithMultiples(1.0f);
auto multiply_by_two = [](float value) { return 2 * value; };
auto result = ReferenceUtil::MapArray4D(*input, multiply_by_two);
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
Array4D<float> expected(2, 3, 4, 5);
expected.FillWithMultiples(2.0f);
LiteralTestUtil::ExpectR4NearArray4D(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapWithIndexArray4D) {
auto input = std::make_unique<Array4D<float>>(2, 3,
4, 5);
input->FillWithMultiples(1.0f);
auto subtract_index = [](float value, int64_t plane, int64_t depth,
int64_t height, int64_t width) {
return value - (3 * 4 * 5 * plane + 4 * 5 * depth + 5 * height + width);
};
auto result = ReferenceUtil::MapWithIndexArray4D(*input, subtract_index);
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
Array4D<float> expected(2, 3, 4, 5);
expected.Fill(0.0f);
LiteralTestUtil::ExpectR4NearArray4D(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceArray2D) {
auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 2}}, {{1, 1}});
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 2.f}, {4.f, 5.f}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceStridedArray2D) {
auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 3}}, {{1, 2}});
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f}, {4.f, 6.f}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceArray3D) {
Array3D<float> input(2, 3, 4);
input.FillIota(0);
auto result =
ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 2, 2}}, {{1, 1, 1}});
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3Near<float>(
{{{0.f, 1.f}, {4.f, 5.f}}, {{12.f, 13.f}, {16.f, 17.f}}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceStridedArray3D) {
Array3D<float> input(2, 3, 4);
input.FillIota(0);
auto result =
ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 3, 4}}, {{1, 2, 2}});
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3Near<float>(
{{{0.f, 2.f}, {8.f, 10.f}}, {{12.f, 14.f}, {20.f, 22.f}}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceArray4D) {
Array4D<float> input(2, 3, 4, 5);
input.FillIota(0);
auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 2, 2, 2}},
{{1, 1, 1, 1}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
LiteralTestUtil::ExpectR4Near<float>(
{{{{60.f, 61.f}, {65.f, 66.f}}, {{80.f, 81.f}, {85.f, 86.f}}}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceStridedArray4D) {
Array4D<float> input(2, 3, 4, 5);
input.FillIota(0);
auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 3, 4, 5}},
{{1, 2, 2, 2}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
LiteralTestUtil::ExpectR4Near<float>(
{{{{60.f, 62.f, 64.f}, {70.f, 72.f, 74.f}},
{{100.f, 102.f, 104.f}, {110.f, 112.f, 114.f}}}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvArray3DWithSamePadding) {
Array3D<float> input = {{{1, 2, 3, 4}}};
Array3D<float> weights = {{{5, 6}}};
std::unique_ptr<Array3D<float>> actual =
ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kSame);
Array3D<float> expected = {{{17, 28, 39, 20}}};
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual);
LiteralTestUtil::ExpectR3NearArray3D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvArray3DWithValidPadding) {
Array3D<float> input = {{{1, 2, 3, 4}}};
Array3D<float> weights = {{{5, 6}}};
std::unique_ptr<Array3D<float>> actual =
ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kValid);
Array3D<float> expected = {{{17, 28, 39}}};
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual);
LiteralTestUtil::ExpectR3NearArray3D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvWithSamePadding) {
Array4D<float> input(1, 1, 4, 4);
input.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
Array4D<float> weights(1, 1, 2, 2);
weights.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4D(input, weights, {1, 1}, Padding::kSame);
Array4D<float> expected(1, 1, 4, 4);
expected.FillWithYX(Array2D<float>({
{100, 126, 152, 76},
{204, 230, 256, 124},
{308, 334, 360, 172},
{149, 160, 171, 80},
}));
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvWithValidPadding) {
Array4D<float> input(1, 1, 4, 4);
input.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
Array4D<float> weights(1, 1, 2, 2);
weights.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4D(input, weights, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 3, 3);
expected.FillWithYX(Array2D<float>({
{1*5+2*6+5*7+6*8, 126, 152},
{204, 230, 256},
{308, 334, 11*5+12*6+15*7+16*8},
}));
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithSamePadding) {
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 2, 3},
{4, 5, 6}},
{{7, 8, 9},
{10, 11, 12}},
{{13, 14, 15},
{16, 17, 18}}
}});
ConvolutionDimensionNumbers dimension_numbers;
dimension_numbers.set_input_batch_dimension(2);
dimension_numbers.set_input_feature_dimension(0);
dimension_numbers.set_output_batch_dimension(2);
dimension_numbers.set_output_feature_dimension(0);
dimension_numbers.add_input_spatial_dimensions(1);
dimension_numbers.add_output_spatial_dimensions(1);
dimension_numbers.add_input_spatial_dimensions(3);
dimension_numbers.add_output_spatial_dimensions(3);
dimension_numbers.set_kernel_output_feature_dimension(0);
dimension_numbers.set_kernel_input_feature_dimension(2);
dimension_numbers.add_kernel_spatial_dimensions(1);
dimension_numbers.add_kernel_spatial_dimensions(3);
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4DGeneralDimensions(
input, weight, {1, 1}, Padding::kSame, dimension_numbers);
Array4D<float> expected({{
{{1110, 1688, 1838, 1226}},
{{1683, 2514, 2685, 1761}},
{{878, 1280, 1358, 866}}
}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithValidPadding) {
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 7, 13},
{4, 10, 16}},
{{2, 8, 14},
{5, 11, 17}},
{{3, 9, 15},
{6, 12, 18}}
}});
ConvolutionDimensionNumbers dimension_numbers;
dimension_numbers.set_input_batch_dimension(2);
dimension_numbers.set_input_feature_dimension(0);
dimension_numbers.set_output_batch_dimension(2);
dimension_numbers.set_output_feature_dimension(0);
dimension_numbers.add_input_spatial_dimensions(1);
dimension_numbers.add_output_spatial_dimensions(1);
dimension_numbers.add_input_spatial_dimensions(3);
dimension_numbers.add_output_spatial_dimensions(3);
dimension_numbers.set_kernel_output_feature_dimension(0);
dimension_numbers.set_kernel_input_feature_dimension(2);
dimension_numbers.add_kernel_spatial_dimensions(3);
dimension_numbers.add_kernel_spatial_dimensions(1);
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4DGeneralDimensions(
input, weight, {1, 1}, Padding::kValid, dimension_numbers);
Array4D<float> expected({{{{2514, 2685}}}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ApplyElementwise2D) {
Array2D<float> a({{1, 2}, {3, 4}});
Array2D<float> b({{10, 20}, {30, 40}});
Array2D<float> c({{100, 200}, {300, 400}});
auto actual = ReferenceUtil::ApplyElementwise2D(
[](float x, float y, float z) { return 100 * x + 10 * y + z; }, a, b, c);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*actual);
LiteralTestUtil::ExpectR2Near({{300.f, 600.f}, {900.f, 1200.f}},
actual_literal, ErrorSpec(0.0001));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/reference_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/reference_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d825912f-d5a7-4c16-9580-be760349350d | cpp | tensorflow/tensorflow | sample_stable_delegate | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_test.cc | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
namespace {
class SampleStableDelegateKernel : public SimpleOpaqueDelegateKernelInterface {
bool IsExternalTensor(const TfLiteOpaqueTensor* opaque_tensor) const {
return external_tensors_.count(opaque_tensor) != 0;
}
void DeriveExternalTensors() {
for (const TfLiteOpaqueTensor* tensor : node_input_tensors_set_) {
if (node_output_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_set_) {
if (node_input_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
}
public:
TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) override {
if (params->delegate == nullptr) return kTfLiteDelegateError;
context_ = context;
builtin_code_.resize(params->nodes_to_replace->size);
node_input_tensors_.resize(params->nodes_to_replace->size);
node_output_tensors_.resize(params->nodes_to_replace->size);
for (int i = 0; i < params->nodes_to_replace->size; ++i) {
const int node_index = params->nodes_to_replace->data[i];
TfLiteOpaqueNode* delegated_node = nullptr;
TfLiteOperator* delegated_node_registration = nullptr;
TfLiteOpaqueContextGetNodeAndRegistration(
context, node_index, &delegated_node, &delegated_node_registration);
auto input_tensor1 = TfLiteOpaqueNodeGetInput(context, delegated_node, 0);
node_input_tensors_[i].push_back(input_tensor1);
node_input_tensors_set_.insert(input_tensor1);
auto input_tensor2 = TfLiteOpaqueNodeGetInput(context, delegated_node, 1);
node_input_tensors_[i].push_back(input_tensor2);
node_input_tensors_set_.insert(input_tensor2);
auto output_tensor =
TfLiteOpaqueNodeGetOutput(context, delegated_node, 0);
node_output_tensors_[i] = output_tensor;
node_output_tensors_set_.insert(output_tensor);
builtin_code_[i] =
TfLiteOperatorGetBuiltInCode(delegated_node_registration);
}
DeriveExternalTensors();
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
if (external_tensors_.empty()) return kTfLiteOk;
const int kTheInputTensorSize =
helpers::CalculateNumElements((*external_tensors_.begin()));
for (std::vector<const TfLiteOpaqueTensor*>& vecs : node_input_tensors_) {
for (const TfLiteOpaqueTensor* tensor : vecs) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory = internal_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory = internal_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
return kTfLiteOk;
}
void ComputeImpl(float* input_1, float* input_2, float* output,
int builtin_code, int number_of_elements) {
for (int i = 0; i < number_of_elements; ++i) {
if (builtin_code == kTfLiteBuiltinAdd) {
output[i] = input_1[i] + input_2[i];
} else {
output[i] = input_1[i] - input_2[i];
}
}
}
float* GetRawDataSource(TfLiteOpaqueContext* context,
const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_tensors_memory_[tensor].data();
}
}
TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
for (int i = 0; i < node_input_tensors_.size(); ++i) {
float* input1 = GetRawDataSource(context, node_input_tensors_[i][0]);
float* input2 = GetRawDataSource(context, node_input_tensors_[i][1]);
float* output = GetRawDataSource(context, node_output_tensors_[i]);
ComputeImpl(input1, input2, output, builtin_code_[i],
helpers::CalculateNumElements(node_output_tensors_[i]));
}
return kTfLiteOk;
}
private:
std::vector<std::vector<const TfLiteOpaqueTensor*>> node_input_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_input_tensors_set_;
std::vector<const TfLiteOpaqueTensor*> node_output_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_output_tensors_set_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> external_tensors_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<float>>
internal_tensors_memory_;
TfLiteOpaqueContext* context_;
std::vector<int> builtin_code_;
};
}
int helpers::CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor) {
int total_num_elements = 1;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(opaque_tensor); ++i) {
total_num_elements *= TfLiteOpaqueTensorDim(opaque_tensor, i);
}
return total_num_elements;
}
bool SampleStableDelegate::IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const {
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration_external);
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
if (builtin_operator == kTfLiteBuiltinAdd) {
TfLiteAddParams* params = reinterpret_cast<TfLiteAddParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
} else if (builtin_operator == kTfLiteBuiltinSub) {
TfLiteSubParams* params = reinterpret_cast<TfLiteSubParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
} else {
return false;
}
if (TfLiteOpaqueNodeNumberOfInputs(node) != 2) return false;
const TfLiteOpaqueTensor* tensor_1 =
TfLiteOpaqueNodeGetInput(context, node, 0);
const TfLiteOpaqueTensor* tensor_2 =
TfLiteOpaqueNodeGetInput(context, node, 1);
if (!tensor_1 || TfLiteOpaqueTensorType(tensor_1) != kTfLiteFloat32)
return false;
if (!tensor_2 || TfLiteOpaqueTensorType(tensor_2) != kTfLiteFloat32)
return false;
if (TfLiteOpaqueTensorNumDims(tensor_1) !=
TfLiteOpaqueTensorNumDims(tensor_2))
return false;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(tensor_1); ++i) {
if (TfLiteOpaqueTensorDim(tensor_1, i) !=
TfLiteOpaqueTensorDim(tensor_2, i)) {
return false;
}
}
return true;
}
TfLiteStatus SampleStableDelegate::Initialize(TfLiteOpaqueContext* context) {
return kTfLiteOk;
}
const char* SampleStableDelegate::Name() const {
return kSampleStableDelegateName;
}
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
SampleStableDelegate::CreateDelegateKernelInterface() {
return std::make_unique<SampleStableDelegateKernel>();
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithAdd) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithSub) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/sub.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor_0, nullptr);
const float kTensor0CellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor_0);
std::vector<float> input_0(n, kTensor0CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_0, input_0.data(),
input_0.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
ASSERT_NE(input_tensor_1, nullptr);
n = tflite::NumElements(input_tensor_1);
const float kTensor1CellValue = 2.f;
std::vector<float> input_1(n, kTensor1CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_1, input_1.data(),
input_1.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensor0CellValue - kTensor1CellValue);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b74a0183-94c8-4b66-a1f4-6699c8cbb84c | cpp | tensorflow/tensorflow | tensor_pjrt_buffer_util | tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc | tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc | #include "tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor) {
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(tensor);
if (av_tensor == nullptr || av_tensor->GetBuffer() == nullptr) {
return absl::InternalError("Input tensor does not have PjRtBuffer.");
}
auto* c_api_buffer =
dynamic_cast<xla::PjRtCApiBuffer*>(av_tensor->GetBuffer().get());
if (c_api_buffer == nullptr) {
return absl::InternalError(
"The PjRtBuffer in the tensor is not type PjRtCApiBuffer.");
}
return c_api_buffer->c_buffer();
}
absl::Status SetPjRtCBufferToTensor(PJRT_Buffer* c_buffer,
xla::PjRtCApiClient* c_api_client,
Tensor* tensor) {
auto buffer = std::make_unique<xla::PjRtCApiBuffer>(c_api_client, c_buffer);
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(tensor);
if (av_tensor == nullptr) {
TF_ASSIGN_OR_RETURN(
*tensor, MakeTensorFromPjRtBuffer(tensor->dtype(), tensor->shape(),
std::move(buffer)));
} else {
av_tensor->SetBuffer(std::move(buffer));
}
return absl::OkStatus();
}
absl::StatusOr<xla::PjRtCApiClient*> GetPjRtCApiClient(
const DeviceType& device_type) {
TF_ASSIGN_OR_RETURN(absl::StatusOr<xla::PjRtClient*> pjrt_client,
tensorflow::GetPjRtClient(device_type));
auto* pjrt_c_api_client = dynamic_cast<xla::PjRtCApiClient*>(*pjrt_client);
if (pjrt_c_api_client == nullptr) {
return absl::InternalError(absl::StrCat("PjRtClient for ",
device_type.type_string(),
" is not type PjRtCApiClient"));
}
return pjrt_c_api_client;
}
absl::Status ResetPjRtClient(const DeviceType& device_type) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->Lookup(rmgr->default_container(),
kPjRtStateResourceName, &pjrt_state));
TF_RETURN_IF_ERROR(pjrt_state->MovePjRtClientToUnused(device_type));
return absl::OkStatus();
}
} | #include "tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_cpu.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_c_api_client.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::tsl::testing::StatusIs;
PJRT_Buffer* CreateCBuffer() {
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
CHECK_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
auto pjrt_client = xla::GetCApiClient(DEVICE_CPU);
CHECK_OK(pjrt_client.status());
auto c_api_client = down_cast<xla::PjRtCApiClient*>(pjrt_client->get());
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
auto buffer = c_api_client->pjrt_c_client()->client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
c_api_client->pjrt_c_client()->client->addressable_devices()[0]);
CHECK_OK(buffer.status());
return new PJRT_Buffer{std::move(*buffer), c_api_client->pjrt_c_client()};
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorNoBuffer) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
EXPECT_THAT(
GetPjRtCBufferFromTensor(&tensor),
StatusIs(error::INTERNAL, HasSubstr(absl::StrCat(
"Input tensor does not have PjRtBuffer"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorIncoorectType) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
av_tensor->SetBuffer(std::move(buffer));
EXPECT_THAT(
GetPjRtCBufferFromTensor(&tensor),
StatusIs(
error::INTERNAL,
HasSubstr(absl::StrCat(
"The PjRtBuffer in the tensor is not type PjRtCApiBuffer"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorSuccess) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
TF_ASSERT_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
std::vector<int32_t> data(1, 0);
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {1});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
tensorflow::AsyncValueTensor* av_tensor =
tensorflow::AsyncValueTensor::FromTensor(&tensor);
av_tensor->SetBuffer(std::move(buffer));
TF_ASSERT_OK_AND_ASSIGN(auto c_buffer, GetPjRtCBufferFromTensor(&tensor));
EXPECT_THAT(c_buffer, NotNull());
}
TEST(TensorPjRtBufferUtilTest, SetPjRtCBufferToTensorNotAsyncValueTensor) {
tensorflow::Tensor tensor(DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
PJRT_Buffer* c_buffer = CreateCBuffer();
TF_EXPECT_OK(SetPjRtCBufferToTensor(
c_buffer, down_cast<xla::PjRtCApiClient*>(pjrt_client.get()), &tensor));
}
TEST(TensorPjRtBufferUtilTest, SetPjRtCBufferToTensorSuccess) {
auto allocator = std::make_unique<AsyncValueAllocator>();
tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
PJRT_Buffer* c_buffer = CreateCBuffer();
TF_EXPECT_OK(SetPjRtCBufferToTensor(
c_buffer, down_cast<xla::PjRtCApiClient*>(pjrt_client.get()), &tensor));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientNotFound) {
EXPECT_THAT(
GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)),
StatusIs(error::NOT_FOUND,
HasSubstr(absl::StrCat("PjRt client not found for device type ",
DEVICE_CPU))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientIncorrectType) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU,
std::move(pjrt_client)));
EXPECT_THAT(GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)),
StatusIs(error::INTERNAL,
HasSubstr(absl::StrCat("PjRtClient for ", DEVICE_CPU,
" is not type PjRtCApiClient"))));
}
TEST(TensorPjRtBufferUtilTest, GetPjRtCApiClientSuccess) {
auto status = pjrt::PjrtApi(DEVICE_CPU);
if (!status.ok()) {
TF_ASSERT_OK(pjrt::SetPjrtApi(DEVICE_CPU, GetPjrtApi()));
}
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, xla::GetCApiClient(DEVICE_CPU));
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
GetPjRtCApiClient(tensorflow::DeviceType(DEVICE_CPU)));
EXPECT_THAT(pjrt_client_get, NotNull());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31b38434-5c80-41ae-9aa3-f9777e02235e | cpp | google/tensorstore | dimension_selection | tensorstore/index_space/internal/dimension_selection.cc | tensorstore/index_space/dimension_selection_test.cc | #include "tensorstore/index_space/internal/dimension_selection.h"
#include <numeric>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status CheckAndNormalizeDimensions(DimensionIndex input_rank,
span<DimensionIndex> dimensions) {
if (dimensions.size() > input_rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of dimensions (", dimensions.size(),
") exceeds input rank (", input_rank, ")."));
}
std::vector<DimensionIndex> error_dimensions;
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex dim,
NormalizeDimensionIndex(dimensions[i], input_rank));
dimensions[i] = dim;
for (DimensionIndex j = 0; j < i; ++j) {
if (dimensions[j] == dim) {
error_dimensions.push_back(dim);
}
}
}
if (!error_dimensions.empty()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Input dimensions {", absl::StrJoin(error_dimensions, ", "),
"} specified more than once"));
}
return absl::OkStatus();
}
absl::Status GetDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
result->assign(dimensions.begin(), dimensions.end());
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(transform.input_rank(), dimensions, result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIdentifier> dimensions,
DimensionIndexBuffer* result) {
const DimensionIndex input_rank = transform.input_rank();
result->resize(dimensions.size());
span<const std::string> input_labels = transform.input_labels();
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
(*result)[i],
NormalizeDimensionIdentifier(dimensions[i], input_labels));
}
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(input_rank + dimensions.size(), dimensions, result);
}
absl::Status GetAllDimensions(DimensionIndex input_rank,
DimensionIndexBuffer* result) {
result->resize(input_rank);
std::iota(result->begin(), result->end(), static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
absl::Status GetDimensions(span<const std::string> labels,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
result->clear();
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDynamicDimSpecs(dimensions, labels, result));
return CheckAndNormalizeDimensions(labels.size(), *result);
}
namespace {
Result<DimensionIndex> GetNumNewDimensions(const DimRangeSpec& spec) {
const DimensionIndex step = spec.step;
if (step == 0) return absl::InvalidArgumentError("step must not be 0");
if (spec.inclusive_start) {
const DimensionIndex inclusive_start = *spec.inclusive_start;
if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if ((exclusive_stop < 0) == (inclusive_start < 0) &&
((step > 0 && exclusive_stop >= inclusive_start) ||
(step < 0 && exclusive_stop <= inclusive_start))) {
return CeilOfRatio(*spec.exclusive_stop - inclusive_start, step);
}
} else if (step > 0) {
if (inclusive_start < 0) {
return CeilOfRatio(-inclusive_start, step);
}
} else {
if (inclusive_start >= 0) {
return CeilOfRatio(inclusive_start + 1, -step);
}
}
} else if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if (step > 0) {
if (exclusive_stop >= 0) {
return CeilOfRatio(exclusive_stop, step);
}
} else {
if (exclusive_stop < 0) {
return CeilOfRatio(-(exclusive_stop + 1), -step);
}
}
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"`", spec, "` is not a valid specification for new dimensions"));
}
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
DimensionIndex new_rank = input_rank;
for (const auto& spec : dimensions) {
if (auto* r = std::get_if<DimRangeSpec>(&spec)) {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex x, GetNumNewDimensions(*r));
new_rank += x;
} else {
new_rank += 1;
}
}
result->clear();
result->reserve(new_rank);
struct Visitor {
DimensionIndex new_rank;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, new_rank));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
return absl::InvalidArgumentError(
"New dimensions cannot be specified by label");
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, new_rank, result);
}
};
for (const auto& spec : dimensions) {
TENSORSTORE_RETURN_IF_ERROR(std::visit(Visitor{new_rank, result}, spec));
}
return CheckAndNormalizeDimensions(new_rank, *result);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::DynamicDims;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(DimsTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0, 0, 1})).IndexSlice(0),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* exceeds input rank .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(2).Label("b"), absl::StatusCode::kInvalidArgument,
"Dimension index 2 is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(1, 1).Label("b", "c"),
absl::StatusCode::kInvalidArgument,
"Input dimensions \\{1\\} specified more than once.*");
}
TEST(DimsTest, SelectUsingLabels) {
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims("x").Label("a"),
{0},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "y"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "y"}).Finalize().value(),
{});
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"x", "y"}).Finalize().value(),
Dims("a").Label("z"), absl::StatusCode::kInvalidArgument,
"Label \"a\" does not match one of \\{\"x\", \"y\"\\}");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"", ""}).Finalize().value(),
Dims("").Label("z"), absl::StatusCode::kInvalidArgument,
"Dimension cannot be specified by empty label");
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims({"x", -1}).Label("a", "b"),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "b"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "b"}).Finalize().value(),
{});
}
TEST(DynamicDimsTest, Existing) {
const auto original_transform = IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value();
const auto expected_identity_new_transform =
IndexTransformBuilder<4, 4>()
.input_labels({"a1", "b1", "c1", "d1"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 0>()
.input_labels({"a1", "b1", "c1", "d1"})
.Finalize()
.value();
TestDimExpression(
original_transform,
Dims(DimRangeSpec{1, 4, 2}, 0, "c").Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
TestDimExpression(
original_transform,
DynamicDims({DimRangeSpec{1, 4, 2}, 0, "c"})
.Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
}
TEST(DynamicDimsTest, CombinedNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, 0, -1).AddNew().Label("e", "f", "g", "h"),
{1, 3, 0, 7},
IndexTransformBuilder<dynamic_rank, 4>(8, tensorstore::StaticRank<4>{})
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 4)
.output_single_input_dimension(2, 5)
.output_single_input_dimension(3, 6)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(8)
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.Finalize()
.value(),
{},
false);
}
TEST(DynamicDimsTest, InvalidNewLabel) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, "x").AddNew(),
absl::StatusCode::kInvalidArgument,
"New dimensions cannot be specified by label");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewUnbounded) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`5:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -3, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:-3` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStartNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-5, std::nullopt, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`-5::-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStart) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 5, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:5:-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidInterval) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{6, 5, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`6:5` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidMixedSigns) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-1, 4, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`-1:4` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewZeroStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 0}).AddNew(), absl::StatusCode::kInvalidArgument,
"step must not be 0");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidIntervalNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, 6, -1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`5:6:-1` is not a valid specification for new dimensions");
}
TEST(DimsTest, DimRangeSpecNegativeStep) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-4, -7, -2}).AddNew().Label("e", "f"),
{2, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecNegativeIndicesNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-6, -3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-3, std::nullopt, 2}).AddNew().Label("e", "f"),
{3, 5},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, std::nullopt, -1}).AddNew().Label("e", "f"),
{1, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -4, -2}).AddNew().Label("e", "f"),
{5, 3},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(ResolveTest, Example) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder<3>().labels({"x", "y", "z"}).Finalize());
DimensionIndexBuffer buffer;
TENSORSTORE_EXPECT_OK(Dims("x", "z").Resolve(domain, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/dimension_selection.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_selection_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a87ce158-3c36-4244-b5ab-8c4260ede5cd | cpp | google/tensorstore | sender_util | tensorstore/util/execution/sender_util.h | tensorstore/util/execution/sender_util_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_SENDER_UTIL_H_
#define TENSORSTORE_UTIL_EXECUTION_SENDER_UTIL_H_
#include <atomic>
#include <iterator>
#include <utility>
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
template <typename FlowReceiver>
struct FlowSingleReceiver {
FlowReceiver receiver;
template <typename... V>
void set_value(V... v) {
execution::set_starting(receiver, [] {});
execution::set_value(receiver, std::move(v)...);
execution::set_done(receiver);
execution::set_stopping(receiver);
}
template <typename E>
void set_error(E e) {
execution::set_starting(receiver, [] {});
execution::set_error(receiver, std::move(e));
execution::set_stopping(receiver);
}
void set_cancel() {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
}
};
template <typename FlowReceiver>
FlowSingleReceiver(FlowReceiver receiver) -> FlowSingleReceiver<FlowReceiver>;
template <typename Sender>
struct FlowSingleSender {
Sender sender;
template <typename Receiver>
void submit(Receiver receiver) {
execution::submit(sender,
FlowSingleReceiver<Receiver>{std::move(receiver)});
}
};
template <typename Sender>
FlowSingleSender(Sender sender) -> FlowSingleSender<Sender>;
template <typename Range>
struct RangeFlowSender {
Range range;
template <typename Receiver>
friend void submit(RangeFlowSender& sender, Receiver receiver) {
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] { cancelled = true; });
using std::begin;
using std::end;
auto it = begin(sender.range);
auto end_it = end(sender.range);
for (; !cancelled && it != end_it; ++it) {
auto&& value = *it;
execution::set_value(receiver, std::forward<decltype(value)>(value));
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
};
}
#endif | #include "tensorstore/util/execution/sender_util.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/any_sender.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
namespace {
TEST(FlowSingleSenderTest, SetValue) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::FlowSingleSender<tensorstore::ValueSender<int, std::string>>{
{3, "hello"}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_value: 3, hello",
"set_done", "set_stopping"));
}
TEST(FlowSingleSenderTest, AnyFlowSenderSetValue) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int, std::string>(
tensorstore::FlowSingleSender<
tensorstore::ValueSender<int, std::string>>{{3, "hello"}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_value: 3, hello",
"set_done", "set_stopping"));
}
TEST(FlowSingleSenderTest, SetError) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<int>>{{3}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_error: 3",
"set_stopping"));
}
TEST(FlowSingleSenderTest, AnyFlowSenderSetError) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int>(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<int>>{{3}}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_error: 3",
"set_stopping"));
}
TEST(FlowSingleSenderTest, SetCancel) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::FlowSingleSender<tensorstore::CancelSender>{},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_done", "set_stopping"));
}
TEST(FlowSingleSenderTest, AnyFlowSenderSetCancel) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int>(
tensorstore::FlowSingleSender<tensorstore::CancelSender>{}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_done", "set_stopping"));
}
TEST(RangeFlowSenderTest, Basic) {
std::vector<int> values{1, 2, 3};
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int>(
tensorstore::RangeFlowSender<std::vector<int>&>{values}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_starting", "set_value: 1",
"set_value: 2", "set_value: 3",
"set_done", "set_stopping"));
}
TEST(RangeFlowSenderTest, CancelImmediately) {
std::vector<int> values{1, 2, 3};
std::vector<std::string> log;
struct Receiver : public tensorstore::LoggingReceiver {
tensorstore::AnyCancelReceiver cancel;
void set_starting(tensorstore::AnyCancelReceiver cancel) {
this->tensorstore::LoggingReceiver::set_starting({});
cancel();
}
};
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int>(
tensorstore::RangeFlowSender<std::vector<int>&>{values}),
Receiver{{&log}});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_done", "set_stopping"));
}
TEST(RangeFlowSenderTest, Cancel) {
std::vector<int> values{1, 2, 3};
std::vector<std::string> log;
struct Receiver : public tensorstore::LoggingReceiver {
tensorstore::AnyCancelReceiver cancel;
void set_starting(tensorstore::AnyCancelReceiver cancel) {
this->cancel = std::move(cancel);
this->tensorstore::LoggingReceiver::set_starting({});
}
void set_value(int value) {
this->tensorstore::LoggingReceiver::set_value(value);
if (value == 2) {
this->cancel();
}
}
};
tensorstore::execution::submit(
tensorstore::AnyFlowSender<int, int>(
tensorstore::RangeFlowSender<std::vector<int>&>{values}),
Receiver{{&log}});
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: 1",
"set_value: 2", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender_util.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
09bea60d-dbf0-4bb3-b457-acc4a46d50db | cpp | tensorflow/tensorflow | collective_rma_distributed | tensorflow/core/distributed_runtime/collective_rma_distributed.cc | tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc | #include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class RecvBufCall : public CancellableCall {
public:
RecvBufCall(int64_t step_id, const string& peer_device,
const string& peer_task, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
const DeviceAttributes& server_attributes,
CancellationManager* cancel_mgr, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, peer_task, wc) {
req_.set_step_id(step_id);
req_.set_buf_rendezvous_key(key);
*req_.mutable_client_locality() = client_locality;
*req_.mutable_server_locality() = server_attributes.locality();
req_.set_num_bytes(to_tensor->TotalBytes());
req_.set_buf_ptr(reinterpret_cast<int64_t>(DMAHelper::base(to_tensor)));
req_.set_src_device(peer_device);
req_.set_src_incarnation(server_attributes.incarnation());
req_.set_dst_device(to_device->name());
req_.set_request_id(GetUniqueRequestId());
}
~RecvBufCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->RecvBufAsync(&opts_, &req_, &resp_, done);
}
RecvBufRequest req_;
RecvBufResponse resp_;
};
void PopulateTensorFromExtra(const RecvBufRespExtra& extra,
Tensor* cpu_tensor) {
char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor));
for (const auto& tensor_content_chunk : extra.tensor_content()) {
memcpy(head, std::string(tensor_content_chunk).data(),
tensor_content_chunk.size());
head += tensor_content_chunk.size();
}
}
Status PopulateTensorFromResponse(const RecvBufResponse& response,
Tensor* cpu_tensor) {
const bool has_transport_options = response.has_transport_options();
if (!has_transport_options) return absl::OkStatus();
const int64_t total_bytes = cpu_tensor->TotalBytes();
int64_t num_bytes = 0;
RecvBufRespExtra extra;
response.transport_options().UnpackTo(&extra);
for (const auto& chunk : extra.tensor_content()) {
num_bytes += chunk.size();
}
if (num_bytes != total_bytes) {
return errors::Internal("Tensor Size Mismatch: RecvBufResponse returned ",
num_bytes,
" bytes, expected: ", cpu_tensor->TotalBytes());
}
PopulateTensorFromExtra(extra, cpu_tensor);
return absl::OkStatus();
}
}
void CollectiveRemoteAccessDistributed::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality, int dev_to_dev_stream_index,
CancellationManager* cancellation_manager, const StatusCallback& done) {
if (peer_is_local) {
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index,
cancellation_manager, done);
return;
}
struct State {
DeviceAttributes server_attributes;
std::unique_ptr<RecvBufCall> call;
std::unique_ptr<Tensor> cpu_tensor;
};
State* state = new State;
Status s = dev_resolver_->GetDeviceAttributes(peer_device,
&state->server_attributes);
if (!s.ok()) {
delete state;
done(s);
return;
}
Tensor* dst_tensor = nullptr;
Device* cpu_dev = nullptr;
if (to_device->tensorflow_accelerator_device_info()) {
Status status = dev_mgr_->LookupDevice("CPU:0", &cpu_dev);
if (!status.ok()) {
delete state;
done(s);
return;
}
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(
"CollectiveRemoteAccessDistributed::RecvFromPeer"
"::recv_buf_callback",
step_id_, "dynamic", to_tensor->dtype(),
[to_tensor]() { return to_tensor->shape().DebugString(); });
state->cpu_tensor =
std::make_unique<Tensor>(cpu_dev->GetAllocator(cpu_attr),
to_tensor->dtype(), to_tensor->shape());
dst_tensor = state->cpu_tensor.get();
} else {
dst_tensor = to_tensor;
}
auto recv_buf_callback =
[this, state, to_device, to_alloc_attr, to_device_ctx, to_tensor, cpu_dev,
dev_to_dev_stream_index, dst_tensor, done](const Status& s) {
if (s.ok()) {
Status status =
PopulateTensorFromResponse(state->call->resp_, dst_tensor);
if (!status.ok()) {
done(status);
delete state;
return;
}
if (to_device->tensorflow_accelerator_device_info()) {
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
CopyTensor::ViaDMA("",
nullptr , to_device_ctx, cpu_dev,
to_device, cpu_attr, to_alloc_attr, dst_tensor,
to_tensor, dev_to_dev_stream_index,
[this, state, done](const Status& s) {
delete state;
work_queue_->Schedule([s, done] { done(s); });
});
return;
}
}
delete state;
done(s);
};
state->call = std::make_unique<RecvBufCall>(
step_id_, peer_device, peer_task, key, to_device, to_device_ctx,
to_alloc_attr, dst_tensor, client_locality, state->server_attributes,
cancellation_manager, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [state] { state->call->Cancel(); });
if (already_aborted) {
recv_buf_callback(errors::Cancelled("collective ops already aborted"));
} else {
state->call->Start([this, abortion_token,
done = std::move(recv_buf_callback)](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
done(s);
});
}
}
void CollectiveRemoteAccessDistributed::CheckPeerHealth(
const string& peer_task, int64_t timeout_in_ms,
const StatusCallback& done) {
if (peer_task == task_name_) {
done(absl::OkStatus());
return;
}
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(peer_task);
if (wi == nullptr) {
done(errors::InvalidArgument(peer_task,
" not found. It's probably invalid. The "
"valid form is /job:xxx/replica:0/task:N"));
return;
}
auto opts = new CallOptions();
opts->SetTimeout(timeout_in_ms);
auto req = new GetStatusRequest();
auto resp = new GetStatusResponse();
wi->GetStatusAsync(
opts, req, resp, true,
[this, opts, req, resp, wi, peer_task, done](Status s) {
std::vector<DeviceAttributes> cached_attrs;
if (s.ok()) {
s = dev_resolver_->GetAllDeviceAttributes(peer_task, &cached_attrs);
}
if (s.ok()) {
absl::flat_hash_set<uint64> remote_incarnations;
for (const DeviceAttributes& da : resp->device_attributes()) {
remote_incarnations.insert(da.incarnation());
}
for (const DeviceAttributes& attr : cached_attrs) {
if (!remote_incarnations.contains(attr.incarnation())) {
s = errors::FailedPrecondition(
attr.name(), " with incarnation ", attr.incarnation(),
" is not available. This usually means ", peer_task,
" has restarted");
break;
}
}
} else if (absl::IsNotFound(s)) {
s = absl::OkStatus();
}
delete opts;
delete req;
delete resp;
worker_cache_->ReleaseWorker(peer_task, wi);
done(s);
});
}
void CollectiveRemoteAccessDistributed::StartAbort(const Status& s) {
CollectiveRemoteAccessLocal::StartAbort(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include <memory>
#include "google/protobuf/any.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class FakeAllocator : public Allocator {
public:
string Name() override { return "fake"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return port::AlignedFree(ptr); }
};
static std::unique_ptr<Device> NewDevice(const string& type, const string& name,
Allocator* allocator) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, Allocator* allocator)
: Device(nullptr, attr), allocator_(allocator) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return allocator_; }
private:
Allocator* const allocator_;
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr, allocator);
}
static int64_t kStepId = 123;
class FakeWorker : public TestWorkerInterface {
public:
FakeWorker(const string& name, DeviceMgr* dev_mgr,
DeviceResolverDistributed* dres, bool is_failed,
bool set_tensor_in_extra)
: name_(name),
device_mgr_(dev_mgr),
device_resolver_(dres),
buf_rendezvous_(kStepId, dev_mgr),
is_failed_(is_failed),
set_tensor_in_extra_(set_tensor_in_extra) {}
BufRendezvous* buf_rendezvous() { return &buf_rendezvous_; }
void GetStatusAsync(CallOptions* opts, const GetStatusRequest* request,
GetStatusResponse* response, bool fail_fast,
StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
std::vector<DeviceAttributes> dev_attr;
device_mgr_->ListDeviceAttributes(&dev_attr);
for (const auto& da : dev_attr) {
*response->add_device_attributes() = da;
}
done(absl::OkStatus());
}
void RecvBufAsync(CallOptions* opts, const RecvBufRequest* request,
RecvBufResponse* response, StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
opts->SetCancelCallback([this]() {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100);
buf_rendezvous_.StartAbort(errors::Internal("Cancelled"));
});
});
VLOG(2) << "ConsumeBuf key=" << request->buf_rendezvous_key()
<< " src_device=" << request->src_device()
<< " src_incarnation=" << request->src_incarnation();
buf_rendezvous_.ConsumeBuf(
request->buf_rendezvous_key(), request->src_device(),
request->src_incarnation(),
[this, opts, request, response, done](const Status& status,
BufRendezvous::Hook* h) {
Status s = status;
if (s.ok()) {
opts->ClearCancelCallback();
int64_t num_bytes = h->prod_value->TotalBytes();
if (set_tensor_in_extra_) {
RecvBufRespExtra extra;
extra.add_tensor_content(string(
reinterpret_cast<const char*>(DMAHelper::base(h->prod_value)),
num_bytes));
response->mutable_transport_options()->PackFrom(extra);
} else {
if (request->num_bytes() != num_bytes) {
s = errors::Internal("Tensor Size Mismatch.");
} else {
memcpy(reinterpret_cast<void*>(request->buf_ptr()),
DMAHelper::base(h->prod_value), num_bytes);
}
}
}
done(s);
if (h) BufRendezvous::DoneWithHook(h);
},
nullptr );
}
private:
string name_;
DeviceMgr* device_mgr_;
DeviceResolverDistributed* device_resolver_;
BufRendezvous buf_rendezvous_;
bool is_failed_;
const bool set_tensor_in_extra_;
};
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
enum TEST_PARAM_DEVICE_TYPE {
TEST_PARAM_DEVICE_TYPE_CPU = 0,
TEST_PARAM_DEVICE_TYPE_GPU,
};
enum TEST_PARAM_TENSOR_LOC {
TEST_PARAM_TENSOR_LOC_AT_BUF_PTR = 0,
TEST_PARAM_TENSOR_LOC_IN_EXTRA,
};
class CollRMADistTest
: public ::testing::TestWithParam<
std::tuple<TEST_PARAM_DEVICE_TYPE, TEST_PARAM_TENSOR_LOC>> {
protected:
CollRMADistTest()
: work_queue_(
std::make_shared<UnboundedWorkQueue>(Env::Default(), "test")) {}
~CollRMADistTest() override {
for (DeviceMgr* dm : device_mgrs_) {
delete dm;
}
for (auto it : dev_resolvers_) {
delete it.second;
}
for (FakeWorker* w : workers_) {
delete w;
}
}
void SetUp() override {
const int num_workers = 2;
const int num_devices = 1;
string device_type = "CPU";
string dev0_worker_name;
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
if (w == 0) {
dev0_worker_name = name;
}
DefineWorker(name, device_type, num_devices);
}
rma_ = std::make_unique<CollectiveRemoteAccessDistributed>(
device_mgrs_[0], dev_resolvers_[dev0_worker_name], work_queue_, &wc_,
kStepId, "/job:worker/replica:0/task:0");
const int kNumElts = 8;
expected_value_ = Tensor(DT_FLOAT, {kNumElts});
to_tensor_ = Tensor(DT_FLOAT, {kNumElts});
large_response_ = Tensor(DT_FLOAT, {2 * kNumElts});
auto exp_alias = expected_value_.flat<float>();
auto to_alias = to_tensor_.flat<float>();
auto large_response_alias = large_response_.flat<float>();
for (int i = 0; i < kNumElts; ++i) {
exp_alias(i) = i;
to_alias(i) = -1;
}
for (int i = 0; i < 2 * kNumElts; ++i) {
large_response_alias(i) = -2;
}
}
void ResolveDeviceAttributes() {
for (auto& dev_resolver_item : dev_resolvers_) {
DeviceResolverDistributed* dev_resolver = dev_resolver_item.second;
for (const auto& item : dev_by_task_) {
TF_CHECK_OK(dev_resolver->UpdateDeviceAttributes(item.second));
}
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i),
&fake_allocator_));
}
DeviceMgr* dev_mgr = new StaticDeviceMgr(std::move(devices));
device_mgrs_.push_back(dev_mgr);
std::vector<DeviceAttributes>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto d : dev_mgr->ListDevices()) {
dv->push_back(d->attributes());
}
DeviceResolverDistributed* dev_res = new DeviceResolverDistributed(dev_mgr);
dev_resolvers_[worker_name] = dev_res;
FakeWorker* fw =
new FakeWorker(worker_name, dev_mgr, dev_res, is_failed,
std::get<TEST_PARAM_TENSOR_LOC>(GetParam()) ==
TEST_PARAM_TENSOR_LOC_IN_EXTRA);
workers_.push_back(fw);
wc_.AddWorker(worker_name, fw);
}
void RestartWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
auto it = dev_resolvers_.find(worker_name);
if (it != dev_resolvers_.end()) {
delete it->second;
dev_resolvers_.erase(it);
}
DefineWorker(worker_name, device_type, num_devices, is_failed);
}
void ValidateResultTensor() {
ASSERT_EQ(expected_value_.NumElements(), to_tensor_.NumElements());
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(expected_value_.flat<float>()(i),
to_tensor_.flat<float>()(i));
}
}
void ValidateResultTensorUnchanged() {
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(-1, to_tensor_.flat<float>()(i));
}
}
void MaybeSetGPUDevice(Device* dst_device) {
if (std::get<TEST_PARAM_DEVICE_TYPE>(GetParam()) ==
TEST_PARAM_DEVICE_TYPE_GPU) {
dst_device->set_tensorflow_accelerator_device_info(
&accelerator_device_info_);
}
}
FakeCache wc_;
CancellationManager cm_;
std::vector<DeviceMgr*> device_mgrs_;
std::unordered_map<string, DeviceResolverDistributed*> dev_resolvers_;
std::unordered_map<string, std::vector<DeviceAttributes>> dev_by_task_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
std::vector<FakeWorker*> workers_;
std::unique_ptr<CollectiveRemoteAccessDistributed> rma_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
CallOptions opts_;
DeviceLocality device_locality_;
AllocatorAttributes alloc_attr_;
FakeAllocator fake_allocator_;
DeviceBase::AcceleratorDeviceInfo accelerator_device_info_;
Tensor expected_value_;
Tensor large_response_;
Tensor to_tensor_;
};
TEST_P(CollRMADistTest, ProdFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstAbort) {
ResolveDeviceAttributes();
Notification consumer_note;
Status consumer_status;
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
rma_->StartAbort(errors::Internal("Deliberate Failure"));
consumer_note.WaitForNotification();
EXPECT_EQ(consumer_status.message(), "Cancelled");
}
TEST_P(CollRMADistTest, ResponseTooLarge) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &large_response_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
EXPECT_THAT(consumer_status.message(),
::testing::HasSubstr("Tensor Size Mismatch"));
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensorUnchanged();
}
TEST_P(CollRMADistTest, WorkerRestart) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string buf_key = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
buf_key, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Notification post_restart_note;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &post_restart_note](const Status& s) {
consumer_status = s;
post_restart_note.Notify();
});
post_restart_note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(consumer_status));
}
TEST_P(CollRMADistTest, CheckHealthOKWithCachedAttr) {
ResolveDeviceAttributes();
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
TF_EXPECT_OK(check_health_status);
}
TEST_P(CollRMADistTest, CheckHealthOKWithoutCachedAttr) {
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(check_health_status.ok());
}
TEST_P(CollRMADistTest, CheckHealthRestarted) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthFailedPeer) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1,
true);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsUnavailable(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthRestartedWithDifferentDevices) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "GPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
INSTANTIATE_TEST_SUITE_P(
TensorInBufPtrOrExtra, CollRMADistTest,
::testing::Combine(::testing::Values(TEST_PARAM_TENSOR_LOC_AT_BUF_PTR,
TEST_PARAM_TENSOR_LOC_IN_EXTRA),
::testing::Values(TEST_PARAM_DEVICE_TYPE_CPU,
TEST_PARAM_DEVICE_TYPE_GPU)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_rma_distributed.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94825b0c-0378-4fc1-b42d-9a415dd00c59 | cpp | tensorflow/tensorflow | hlo_parser | third_party/xla/xla/hlo/parser/hlo_parser.cc | third_party/xla/xla/hlo/parser/hlo_parser_test.cc | #include "xla/hlo/parser/hlo_parser.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_original_value.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/parser/hlo_lexer.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/name_uniquer.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
const int8_t kDebugLevel = 10;
const int8_t kErrorLevel = 1;
HloSchedule ScheduleFromInstructionOrder(HloModule* module) {
HloSchedule schedule(module);
for (HloComputation* computation : module->computations()) {
if (!computation->IsFusionComputation()) {
for (HloInstruction* instruction : computation->instructions()) {
schedule.GetOrCreateSequence(computation).push_back(instruction);
}
}
}
return schedule;
}
bool CanInferShape(HloOpcode code) {
switch (code) {
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAtan2:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCeil:
case HloOpcode::kCholesky:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConvolution:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDivide:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFft:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kGetTupleElement:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kAnd:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kPad:
case HloOpcode::kPartitionId:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReduce:
case HloOpcode::kRemainder:
case HloOpcode::kReplicaId:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kTopK:
return true;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDynamicReshape:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduceScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSlice:
case HloOpcode::kBitcast:
case HloOpcode::kBitcastConvert:
case HloOpcode::kConstant:
case HloOpcode::kConvert:
case HloOpcode::kCustomCall:
case HloOpcode::kFusion:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kReducePrecision:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kStochasticConvert:
return false;
}
}
class HloParserImpl : public HloParser {
public:
using LocTy = HloLexer::LocTy;
using BoolList = absl::InlinedVector<bool, 1>;
explicit HloParserImpl(absl::string_view str,
const HloParserOptions& options = HloParserOptions())
: lexer_(str), options_(options) {}
absl::Status Run(HloModule* module) override;
std::string GetError() const { return StrJoin(error_, "\n"); }
absl::StatusOr<Shape> ParseShapeOnly();
absl::StatusOr<Layout> ParseLayoutOnly();
absl::StatusOr<HloSharding> ParseShardingOnly();
absl::StatusOr<FrontendAttributes> ParseFrontendAttributesOnly();
absl::StatusOr<StatisticsViz> ParseStatisticsVizOnly();
absl::StatusOr<std::vector<bool>> ParseParameterReplicationOnly();
absl::StatusOr<BoolList> ParseBooleanListOrSingleBooleanOnly();
absl::StatusOr<Window> ParseWindowOnly();
absl::StatusOr<ConvolutionDimensionNumbers>
ParseConvolutionDimensionNumbersOnly();
absl::StatusOr<PaddingConfig> ParsePaddingConfigOnly();
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly();
private:
enum class AttrTy {
kBool,
kInt64,
kInt32,
kFloat,
kString,
kLiteral,
kBracedInt64List,
kBracedInt64ListList,
kHloComputation,
kBracedHloComputationList,
kFftType,
kPaddingType,
kComparisonDirection,
kComparisonType,
kWindow,
kConvolutionDimensionNumbers,
kSharding,
kFrontendAttributes,
kStatisticsViz,
kBracedBoolListOrBool,
kParameterReplication,
kInstructionList,
kSliceRanges,
kPaddingConfig,
kMetadata,
kFusionKind,
kDistribution,
kDomain,
kPrecisionList,
kShape,
kShapeList,
kEnum,
kRandomAlgorithm,
kPrecisionAlgorithm,
kAliasing,
kBufferDonor,
kComputationLayout,
kInstructionAliasing,
kCustomCallSchedule,
kCustomCallApiVersion,
kSparsityDescriptor,
kStringOrJsonDict,
kCollectiveDeviceList,
kOriginalValue,
};
struct AttrConfig {
bool required;
AttrTy attr_type;
void* result;
};
using InstrNameTable =
absl::flat_hash_map<std::string, std::pair<HloInstruction*, LocTy>>;
InstrNameTable& current_name_table() { return scoped_name_tables_.back(); }
std::pair<HloInstruction*, LocTy>* FindInstruction(
const std::string& name, const optional<Shape>& shape = nullopt);
bool ParseSingleInstruction(HloModule* module);
bool ParseHloModule(HloModule* module,
bool parse_module_without_header = false);
bool ParseComputations(HloModule* module);
bool ParseComputation(HloComputation** entry_computation);
bool ParseInstructionList(HloComputation** computation,
const std::string& computation_name);
bool ParseInstruction(HloComputation::Builder* builder,
std::string* root_name);
bool ParseInstructionRhs(HloComputation::Builder* builder, std::string name,
LocTy name_loc, bool allow_attributes = true);
bool ParseControlPredecessors(HloInstruction* instruction);
bool ParseLiteral(Literal* literal);
bool ParseLiteral(Literal* literal, const Shape& shape);
bool ParseTupleLiteral(Literal* literal, const Shape& shape);
bool ParseNonTupleLiteral(Literal* literal, const Shape& shape);
bool ParseDenseLiteral(Literal* literal, const Shape& shape);
HloInstruction* CreateInstruction(
HloComputation::Builder* builder, absl::string_view name,
std::optional<Shape> shape, HloOpcode opcode,
std::optional<HloOpcode> async_wrapped_opcode,
absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes,
std::vector<HloInstruction*>* preset_operands = nullptr);
bool SetValueInLiteral(LocTy loc, int64_t value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, double value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, bool value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, std::complex<double> value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool SetValueInLiteralHelper(LocTy loc, ParsedElemT value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool CheckParsedValueIsInRange(LocTy loc, ParsedElemT value);
template <typename LiteralNativeT>
bool CheckParsedValueIsInRange(LocTy loc, std::complex<double> value);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder, int expected_size);
struct SliceRanges {
std::vector<int64_t> starts;
std::vector<int64_t> limits;
std::vector<int64_t> strides;
};
struct DomainData {
std::unique_ptr<DomainMetadata> entry_metadata;
std::unique_ptr<DomainMetadata> exit_metadata;
};
bool ParseAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes = true, const std::optional<Shape>& shape = {});
bool ParseSubAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs);
bool ParseAttributeHelper(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
absl::flat_hash_set<std::string>* seen_attrs,
const std::optional<Shape>& shape = {});
bool CopyAttributeToProtoMessage(
absl::flat_hash_set<std::string> non_proto_attrs,
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
tsl::protobuf::Message* message);
bool ParseAttributesAsProtoMessage(
const absl::flat_hash_map<std::string, AttrConfig>& non_proto_attrs,
tsl::protobuf::Message* message);
bool ParseComputationName(HloComputation** value);
bool ParseInstructionNames(std::vector<HloInstruction*>* instructions);
bool ParseWindow(Window* window, bool expect_outer_curlies);
bool ParseConvolutionDimensionNumbers(ConvolutionDimensionNumbers* dnums);
bool ParsePaddingConfig(PaddingConfig* padding);
bool ParseMetadata(OpMetadata& metadata);
bool ParseSingleOrListMetadata(std::vector<OpMetadata>& metadata);
bool ParseOpShardingType(OpSharding::Type* type);
bool ParseListShardingType(std::vector<OpSharding::Type>* types);
bool ParseSharding(std::optional<HloSharding>& sharding);
bool ParseCollectiveDeviceList(CollectiveDeviceList* device_list);
bool ParseFrontendAttributes(FrontendAttributes* frontend_attributes);
bool ParseStatisticsViz(StatisticsViz* statistics_viz);
bool ParseTileAssignment(std::vector<int64_t>& tile_assignment_dimensions,
std::vector<int64_t>& iota_reshape_dims,
std::vector<int>& iota_transpose_perm,
std::vector<int64_t>* devices);
bool ParseSingleSharding(std::optional<HloSharding>& sharding,
bool lbrace_pre_lexed);
bool ParseParameterReplication(ParameterReplication* parameter_replication);
bool ParseBooleanListOrSingleBoolean(BoolList* boolean_list);
bool ParseReplicaGroupsOnly(std::vector<ReplicaGroup>* replica_groups);
bool ParseDomain(DomainData* domain);
bool ParseDxD(const std::string& name, std::vector<int64_t>* result);
bool ParseWindowPad(std::vector<std::vector<int64_t>>* pad);
bool ParseSliceRanges(SliceRanges* result);
bool ParsePrecisionList(std::vector<PrecisionConfig::Precision>* result);
bool ParseHloComputation(HloComputation** result);
bool ParseHloComputationList(std::vector<HloComputation*>* result);
bool ParseShapeList(std::vector<Shape>* result);
bool ParseInt64List(TokKind start, TokKind end, TokKind delim,
std::vector<int64_t>* result);
bool ParseInt64ListList(TokKind start, TokKind end, TokKind delim,
std::vector<std::vector<int64_t>>* result);
bool ParseList(TokKind start, TokKind end, TokKind delim,
absl::FunctionRef<bool()> parse_and_add_item);
bool ParseParamListToShape(Shape* shape, LocTy* shape_loc);
bool ParseParamList();
bool ParseName(std::string* result);
bool ParseAttributeName(std::string* result);
bool ParseString(std::string* result);
bool ParseJsonDict(std::string* result);
bool ParseDimensionSizes(std::vector<int64_t>* dimension_sizes,
std::vector<bool>* dynamic_dimensions);
bool ParseShape(Shape* result);
bool ParseLayout(Layout* layout);
bool ParseLayoutIntAttribute(int64_t* attr_value,
absl::string_view attr_description);
bool ParseDimLevelTypes(
absl::InlinedVector<DimLevelType, InlineRank()>* dim_level_types,
absl::InlinedVector<bool, InlineRank()>* dim_unique,
absl::InlinedVector<bool, InlineRank()>* dim_ordered);
bool ParseTiles(std::vector<Tile>* tiles);
bool ParseSplitConfigs(std::vector<SplitConfig>& split_configs);
bool ParsePhysicalShape(Shape* physical_shape);
bool ParseOpcode(HloOpcode* opcode,
std::optional<HloOpcode>* async_wrapped_opcode);
bool ParseFftType(FftType* result);
bool ParsePaddingType(PaddingType* result);
bool ParsePrimitiveType(PrimitiveType* result);
bool ParseComparisonDirection(ComparisonDirection* result);
bool ParseComparisonType(Comparison::Type* result);
bool ParseFusionKind(HloInstruction::FusionKind* result);
bool ParseRandomDistribution(RandomDistribution* result);
bool ParseRandomAlgorithm(RandomAlgorithm* result);
bool ParsePrecision(PrecisionConfig::Precision* result);
bool ParseAlgorithm(PrecisionConfig::Algorithm* result);
bool ParseInt64(int64_t* result);
bool ParseDouble(double* result);
bool ParseComplex(std::complex<double>* result);
bool ParseBool(bool* result);
bool ParseToken(TokKind kind, const std::string& msg);
bool ParseUnsignedIntegerType(PrimitiveType* primitive_type);
bool ParseOriginalValue(
optional<std::shared_ptr<OriginalValue>>* original_value,
const Shape& shape);
using AliasingData =
absl::flat_hash_map<ShapeIndex, HloInputOutputAliasConfig::Alias>;
using BufferDonor = absl::flat_hash_set<HloBufferDonorConfig::BufferDonor>;
bool ParseAliasing(AliasingData* data);
bool ParseBufferDonor(BufferDonor* data);
bool ParseComputationLayout(ComputationLayout* computation_layout);
bool ParseInstructionOutputOperandAliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>*
aliasing_output_operand_pairs);
bool ParseCustomCallSchedule(CustomCallSchedule* result);
bool ParseCustomCallApiVersion(CustomCallApiVersion* result);
bool ParseSparsityDescriptor(std::vector<SparsityDescriptor>* result);
bool ParseShapeIndex(ShapeIndex* out);
bool CanBeShape();
bool CanBeParamListToShape();
bool TokenError(absl::string_view msg);
bool Error(LocTy loc, absl::string_view msg);
bool EatIfPresent(TokKind kind);
bool AddInstruction(const std::string& name, HloInstruction* instruction,
LocTy name_loc);
bool AddComputation(const std::string& name, HloComputation* computation,
LocTy name_loc);
HloLexer lexer_;
std::vector<InstrNameTable> scoped_name_tables_;
class Scope {
public:
explicit Scope(std::vector<InstrNameTable>* scoped_name_tables)
: scoped_name_tables_(scoped_name_tables) {
scoped_name_tables_->emplace_back();
}
~Scope() { scoped_name_tables_->pop_back(); }
private:
std::vector<InstrNameTable>* scoped_name_tables_;
};
absl::flat_hash_map<std::string, std::pair<HloComputation*, LocTy>>
computation_pool_;
std::vector<std::unique_ptr<HloComputation>> computations_;
std::vector<std::string> error_;
std::function<std::pair<HloInstruction*, LocTy>*(const std::string& name,
const Shape& shape)>
create_missing_instruction_;
NameUniquer name_uniquer_{"."};
const HloParserOptions options_;
};
bool SplitToInt64s(absl::string_view s, char delim, std::vector<int64_t>* out) {
for (const auto& split : absl::StrSplit(s, delim)) {
int64_t val;
if (!absl::SimpleAtoi(split, &val)) {
return false;
}
out->push_back(val);
}
return true;
}
std::vector<ReplicaGroup> CreateReplicaGroups(
absl::Span<const std::vector<int64_t>> groups) {
std::vector<ReplicaGroup> replica_groups;
absl::c_transform(groups, std::back_inserter(replica_groups),
[](const std::vector<int64_t>& ids) {
ReplicaGroup group;
*group.mutable_replica_ids() = {ids.begin(), ids.end()};
return group;
});
return replica_groups;
}
bool HloParserImpl::Error(LocTy loc, absl::string_view msg) {
auto line_col = lexer_.GetLineAndColumn(loc);
const unsigned line = line_col.first;
const unsigned col = line_col.second;
std::vector<std::string> error_lines;
error_lines.push_back(
StrCat("was parsing ", line, ":", col, ": error: ", msg));
error_lines.emplace_back(lexer_.GetLine(loc));
error_lines.push_back(col == 0 ? "" : StrCat(std::string(col - 1, ' '), "^"));
error_.push_back(StrJoin(error_lines, "\n"));
VLOG(kErrorLevel) << "Error: " << error_.back();
return false;
}
bool HloParserImpl::TokenError(absl::string_view msg) {
return Error(lexer_.GetLoc(), msg);
}
absl::Status HloParserImpl::Run(HloModule* module) {
lexer_.Lex();
if ((lexer_.GetKind() == TokKind::kw_HloModule) ||
(lexer_.GetKind() == TokKind::kw_ENTRY) ||
(lexer_.LookAhead() == TokKind::kLbrace)) {
bool parse_module_without_header =
(lexer_.GetKind() == TokKind::kw_HloModule) ? false : true;
if (!ParseHloModule(module, parse_module_without_header)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a HloModule:\n%s",
GetError());
}
return absl::OkStatus();
}
if (!ParseSingleInstruction(module)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a single "
"HloInstruction:\n%s",
GetError());
}
return absl::OkStatus();
}
std::pair<HloInstruction*, HloParserImpl::LocTy>*
HloParserImpl::FindInstruction(const std::string& name,
const optional<Shape>& shape) {
std::pair<HloInstruction*, LocTy>* instr = nullptr;
if (!name.empty()) {
instr = tsl::gtl::FindOrNull(current_name_table(), name);
}
if (instr == nullptr && create_missing_instruction_ != nullptr &&
scoped_name_tables_.size() == 1) {
if (!shape.has_value()) {
Error(lexer_.GetLoc(),
"Operand had no shape in HLO text; cannot create parameter for "
"single-instruction module.");
return nullptr;
}
return create_missing_instruction_(name, *shape);
}
if (instr != nullptr && shape.has_value() &&
!ShapeUtil::Compatible(instr->first->shape(), shape.value())) {
Error(
lexer_.GetLoc(),
StrCat("The declared operand shape ",
ShapeUtil::HumanStringWithLayout(shape.value()),
" is not compatible with the shape of the operand instruction ",
ShapeUtil::HumanStringWithLayout(instr->first->shape()), "."));
return nullptr;
}
return instr;
}
bool HloParserImpl::ParseShapeIndex(ShapeIndex* out) {
if (!ParseToken(TokKind::kLbrace, "Expects '{' at the start of ShapeIndex")) {
return false;
}
std::vector<int64_t> idxs;
while (lexer_.GetKind() != TokKind::kRbrace) {
int64_t idx;
if (!ParseInt64(&idx)) {
return false;
}
idxs.push_back(idx);
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace, "Expects '}' at the end of ShapeIndex")) {
return false;
}
*out = ShapeIndex(idxs.begin(), idxs.end());
return true;
}
bool HloParserImpl::ParseAliasing(AliasingData* data) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of aliasing description")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRbrace) {
ShapeIndex out;
if (!ParseShapeIndex(&out)) {
return false;
}
std::string errmsg =
"Expected format: <output_shape_index>: (<input_param>, "
"<input_param_shape_index>) OR <output_shape_index>: <input_param>";
if (!ParseToken(TokKind::kColon, errmsg)) {
return false;
}
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t param_num;
ParseInt64(¶m_num);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex param_idx;
if (!ParseShapeIndex(¶m_idx)) {
return false;
}
HloInputOutputAliasConfig::AliasKind alias_kind =
HloInputOutputAliasConfig::kMayAlias;
if (EatIfPresent(TokKind::kComma)) {
std::string type;
ParseName(&type);
if (type == "must-alias") {
alias_kind = HloInputOutputAliasConfig::kMustAlias;
} else if (type == "may-alias") {
alias_kind = HloInputOutputAliasConfig::kMayAlias;
} else {
return TokenError("Unexpected aliasing kind; expected SYSTEM or USER");
}
}
data->emplace(std::piecewise_construct, std::forward_as_tuple(out),
std::forward_as_tuple(param_num, param_idx, alias_kind));
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of aliasing description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseBufferDonor(BufferDonor* data) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of buffer donor description")) {
return false;
}
std::string errmsg =
"Expected format: (<input_param>, <input_param_shape_index>)";
while (lexer_.GetKind() != TokKind::kRbrace) {
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t param_num;
ParseInt64(¶m_num);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex param_idx;
if (!ParseShapeIndex(¶m_idx)) {
return false;
}
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
data->emplace(param_num, param_idx);
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of buffer donor description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseComputationLayout(
ComputationLayout* computation_layout) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of aliasing description")) {
return false;
}
if (!ParseToken(TokKind::kLparen, "Expects ( before parameter shape list")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRparen) {
Shape param;
if (!ParseShape(¶m)) {
return false;
}
computation_layout->add_parameter_layout(ShapeLayout(param));
if (lexer_.GetKind() == TokKind::kRparen) {
break;
}
if (!ParseToken(TokKind::kComma, "Expects , between parameter shapes")) {
return false;
}
}
if (!ParseToken(TokKind::kRparen,
"Expects ) at end of parameter shape list")) {
return false;
}
if (!ParseToken(TokKind::kArrow, "Expects -> before result shape")) {
return false;
}
Shape result;
if (!ParseShape(&result)) {
return false;
}
*computation_layout->mutable_result_layout() = ShapeLayout(result);
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of computation layouts")) {
return false;
}
return true;
}
bool HloParserImpl::ParseInstructionOutputOperandAliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>*
aliasing_output_operand_pairs) {
if (!ParseToken(
TokKind::kLbrace,
"Expects '{' at the start of instruction aliasing description")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRbrace) {
ShapeIndex out;
if (!ParseShapeIndex(&out)) {
return false;
}
std::string errmsg =
"Expected format: <output_shape_index>: (<operand_index>, "
"<operand_shape_index>)";
if (!ParseToken(TokKind::kColon, errmsg)) {
return false;
}
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t operand_index;
ParseInt64(&operand_index);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex operand_shape_index;
if (!ParseShapeIndex(&operand_shape_index)) {
return false;
}
aliasing_output_operand_pairs->emplace_back(
out,
std::pair<int64_t, ShapeIndex>{operand_index, operand_shape_index});
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(
TokKind::kRbrace,
"Expects '}' at the end of instruction aliasing description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseCustomCallSchedule(CustomCallSchedule* result) {
VLOG(kDebugLevel) << "ParseCustomCallSchedule";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects custom-call schedule");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToCustomCallSchedule(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects custom-call schedule but sees: %s, error: %s", val,
status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseCustomCallApiVersion(CustomCallApiVersion* result) {
VLOG(kDebugLevel) << "ParseCustomCallApiVersion";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects custom-call API version");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToCustomCallApiVersion(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects custom-call API version but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseSparsityDescriptor(
std::vector<SparsityDescriptor>* result) {
VLOG(kDebugLevel) << "ParseSparsityDescriptor";
if (lexer_.GetKind() != TokKind::kSparsityDesc) {
return TokenError("expects sparsity descriptor, e.g. L.0@2:4");
}
std::string val = lexer_.GetStrVal();
std::vector<absl::string_view> split = absl::StrSplit(val, '_');
for (absl::string_view item : split) {
std::vector<absl::string_view> splitA = absl::StrSplit(item, '@');
std::vector<absl::string_view> splitB = absl::StrSplit(splitA[0], '.');
std::vector<absl::string_view> splitC = absl::StrSplit(splitA[1], ':');
SparsityDescriptor descriptor;
int dim, n, m;
if (!absl::SimpleAtoi(splitB[1], &dim) || dim < 0) {
return TokenError("Invalid dimension number");
}
if (!absl::SimpleAtoi(splitC[0], &n) || !absl::SimpleAtoi(splitC[1], &m) ||
n < 1 || m <= n) {
return TokenError("Invalid structured sparsity type");
}
descriptor.set_type(SparsityType::SPARSITY_STRUCTURED_N_M);
descriptor.set_index(splitB[0] == "L" ? 0 : 1);
descriptor.set_dimension(dim);
descriptor.set_n(n);
descriptor.set_m(m);
result->push_back(descriptor);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseHloModule(HloModule* module,
bool parse_module_without_header) {
std::string name;
std::optional<bool> is_scheduled;
std::optional<int64_t> replica_count;
std::optional<int64_t> num_partitions;
std::optional<AliasingData> aliasing_data;
std::optional<BufferDonor> buffer_donor_data;
std::optional<bool> alias_passthrough_params;
absl::flat_hash_map<std::string, AttrConfig> attrs;
std::optional<ComputationLayout> entry_computation_layout;
std::optional<FrontendAttributes> frontend_attributes;
BoolList allow_spmd_sharding_propagation_to_parameters;
BoolList allow_spmd_sharding_propagation_to_output;
attrs["is_scheduled"] = {false, AttrTy::kBool, &is_scheduled};
attrs["replica_count"] = {false, AttrTy::kInt64, &replica_count};
attrs["num_partitions"] = {false, AttrTy::kInt64,
&num_partitions};
attrs["input_output_alias"] = {false, AttrTy::kAliasing,
&aliasing_data};
attrs["buffer_donor"] = {false, AttrTy::kBufferDonor,
&buffer_donor_data};
attrs["alias_passthrough_params"] = {false, AttrTy::kBool,
&alias_passthrough_params};
attrs["entry_computation_layout"] = {false,
AttrTy::kComputationLayout,
&entry_computation_layout};
attrs["frontend_attributes"] = {
false, AttrTy::kFrontendAttributes, &frontend_attributes};
attrs["allow_spmd_sharding_propagation_to_parameters"] = {
false, AttrTy::kBracedBoolListOrBool,
&allow_spmd_sharding_propagation_to_parameters};
attrs["allow_spmd_sharding_propagation_to_output"] = {
false, AttrTy::kBracedBoolListOrBool,
&allow_spmd_sharding_propagation_to_output};
if (!parse_module_without_header) {
if (lexer_.GetKind() != TokKind::kw_HloModule) {
return TokenError("expects HloModule");
}
lexer_.Lex();
if (!ParseName(&name)) {
return false;
}
if (!ParseAttributes(attrs)) {
return false;
}
module->set_name(name);
}
if (!ParseComputations(module)) {
return false;
}
if (parse_module_without_header) {
name = absl::StrCat("module_", module->entry_computation()->name());
}
module->set_name(name);
if (is_scheduled.value_or(false)) {
TF_CHECK_OK(module->set_schedule(ScheduleFromInstructionOrder(module)));
}
HloModuleConfig config = module->config();
bool default_config = true;
if (alias_passthrough_params.value_or(false)) {
config.set_alias_passthrough_params(true);
default_config = false;
}
if (num_partitions.value_or(1) != 1) {
config.set_num_partitions(*num_partitions);
config.set_use_spmd_partitioning(true);
default_config = false;
}
if (replica_count.value_or(1) != 1) {
config.set_replica_count(*replica_count);
default_config = false;
}
if (entry_computation_layout.has_value()) {
*config.mutable_entry_computation_layout() = *entry_computation_layout;
default_config = false;
} else {
HloComputation* entry_computation = module->entry_computation();
for (int64_t p = 0; p < entry_computation->num_parameters(); p++) {
const Shape& param_shape =
entry_computation->parameter_instruction(p)->shape();
TF_CHECK_OK(module->mutable_entry_computation_layout()
->mutable_parameter_layout(p)
->CopyLayoutFromShape(param_shape));
}
const Shape& result_shape = entry_computation->root_instruction()->shape();
TF_CHECK_OK(module->mutable_entry_computation_layout()
->mutable_result_layout()
->CopyLayoutFromShape(result_shape));
}
if (frontend_attributes) {
module->set_frontend_attributes(frontend_attributes.value());
}
if (!allow_spmd_sharding_propagation_to_parameters.empty()) {
config.set_allow_spmd_sharding_propagation_to_parameters(
allow_spmd_sharding_propagation_to_parameters);
default_config = false;
}
if (!allow_spmd_sharding_propagation_to_output.empty()) {
config.set_allow_spmd_sharding_propagation_to_output(
allow_spmd_sharding_propagation_to_output);
default_config = false;
}
if (!default_config) {
module->set_config(config);
}
if (aliasing_data) {
HloInputOutputAliasConfig alias_config(module->result_shape());
for (auto& p : *aliasing_data) {
absl::Status st =
alias_config.SetUpAlias(p.first, p.second.parameter_number,
p.second.parameter_index, p.second.kind);
if (!st.ok()) {
return TokenError(st.message());
}
}
module->input_output_alias_config() = alias_config;
}
if (buffer_donor_data) {
HloBufferDonorConfig buffer_donor_config;
for (auto& p : *buffer_donor_data) {
absl::Status st =
buffer_donor_config.AddBufferDonor(p.param_number, p.param_index);
if (!st.ok()) {
return TokenError(st.message());
}
}
module->buffer_donor_config() = buffer_donor_config;
}
return true;
}
bool HloParserImpl::ParseComputations(HloModule* module) {
HloComputation* entry_computation = nullptr;
do {
if (!ParseComputation(&entry_computation)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kEof);
for (int i = 0; i < computations_.size(); i++) {
if ((entry_computation != nullptr &&
computations_[i].get() != entry_computation) ||
(entry_computation == nullptr && i != computations_.size() - 1)) {
module->AddEmbeddedComputation(std::move(computations_[i]));
continue;
}
module->AddEntryComputation(std::move(computations_[i]));
}
return true;
}
bool HloParserImpl::ParseComputation(HloComputation** entry_computation) {
LocTy maybe_entry_loc = lexer_.GetLoc();
const bool is_entry_computation = EatIfPresent(TokKind::kw_ENTRY);
std::string name;
LocTy name_loc = lexer_.GetLoc();
if (!ParseName(&name)) {
return false;
}
LocTy shape_loc = nullptr;
Shape shape;
if (CanBeParamListToShape() && !ParseParamListToShape(&shape, &shape_loc)) {
return false;
}
HloComputation* computation = nullptr;
if (!ParseInstructionList(&computation, name)) {
return false;
}
if (shape_loc != nullptr &&
!ShapeUtil::Compatible(computation->root_instruction()->shape(), shape)) {
return Error(
shape_loc,
StrCat(
"Shape of computation ", name, ", ", ShapeUtil::HumanString(shape),
", is not compatible with that of its root instruction ",
computation->root_instruction()->name(), ", ",
ShapeUtil::HumanString(computation->root_instruction()->shape())));
}
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<std::string> execution_thread = HloInstruction::kMainExecutionThread;
attrs["execution_thread"] = {false, AttrTy::kString,
&execution_thread};
if (!ParseAttributes(attrs)) {
return false;
}
computation->SetExecutionThread(*execution_thread);
if (is_entry_computation) {
if (*entry_computation != nullptr) {
return Error(maybe_entry_loc, "expects only one ENTRY");
}
*entry_computation = computation;
}
return AddComputation(name, computation, name_loc);
}
bool HloParserImpl::ParseInstructionList(HloComputation** computation,
const std::string& computation_name) {
Scope scope(&scoped_name_tables_);
HloComputation::Builder builder(computation_name);
if (!ParseToken(TokKind::kLbrace,
"expects '{' at the beginning of instruction list.")) {
return false;
}
std::string root_name;
do {
if (!ParseInstruction(&builder, &root_name)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kRbrace);
if (!ParseToken(TokKind::kRbrace,
"expects '}' at the end of instruction list.")) {
return false;
}
HloInstruction* root = nullptr;
if (!root_name.empty()) {
std::pair<HloInstruction*, LocTy>* root_node =
tsl::gtl::FindOrNull(current_name_table(), root_name);
if (root_node == nullptr) {
LOG(FATAL) << "instruction " << root_name
<< " was marked as ROOT but the parser has not seen it before";
}
root = root_node->first;
}
computations_.emplace_back(builder.Build(root));
*computation = computations_.back().get();
return true;
}
bool HloParserImpl::ParseInstruction(HloComputation::Builder* builder,
std::string* root_name) {
std::string name;
LocTy maybe_root_loc = lexer_.GetLoc();
bool is_root = EatIfPresent(TokKind::kw_ROOT);
const LocTy name_loc = lexer_.GetLoc();
if (!ParseName(&name) ||
!ParseToken(TokKind::kEqual, "expects '=' in instruction")) {
return false;
}
if (is_root) {
if (!root_name->empty()) {
return Error(maybe_root_loc, "one computation should have only one ROOT");
}
*root_name = name;
}
return ParseInstructionRhs(builder, name, name_loc);
}
bool HloParserImpl::ParseInstructionRhs(HloComputation::Builder* builder,
std::string name, LocTy name_loc,
bool allow_attributes) {
Shape shape;
HloOpcode opcode;
std::optional<HloOpcode> async_wrapped_opcode;
std::vector<HloInstruction*> operands;
const bool parse_shape = CanBeShape();
if ((parse_shape && !ParseShape(&shape)) ||
!ParseOpcode(&opcode, &async_wrapped_opcode)) {
return false;
}
if (!parse_shape && !CanInferShape(opcode)) {
return TokenError(StrFormat("cannot infer shape for opcode: %s",
HloOpcodeString(opcode)));
}
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<HloSharding> sharding;
optional<FrontendAttributes> frontend_attributes;
optional<StatisticsViz> statistics_viz;
attrs["sharding"] = {false, AttrTy::kSharding, &sharding};
attrs["frontend_attributes"] = {
false, AttrTy::kFrontendAttributes, &frontend_attributes};
attrs["statistics"] = {false, AttrTy::kStatisticsViz,
&statistics_viz};
optional<ParameterReplication> parameter_replication;
attrs["parameter_replication"] = {false,
AttrTy::kParameterReplication,
¶meter_replication};
optional<std::vector<HloInstruction*>> predecessors;
attrs["control-predecessors"] = {false, AttrTy::kInstructionList,
&predecessors};
optional<std::shared_ptr<OriginalValue>> original_value;
attrs["origin"] = {false, AttrTy::kOriginalValue,
&original_value};
optional<OpMetadata> metadata;
attrs["metadata"] = {false, AttrTy::kMetadata, &metadata};
optional<std::string> backend_config;
attrs["backend_config"] = {false, AttrTy::kStringOrJsonDict,
&backend_config};
std::optional<Shape> maybe_shape;
if (parse_shape) {
maybe_shape = shape;
}
HloInstruction* instruction =
CreateInstruction(builder, name, maybe_shape, opcode,
async_wrapped_opcode, attrs, allow_attributes);
if (instruction == nullptr) {
return false;
}
if (name.empty()) {
name = name_uniquer_.GetUniqueName(
absl::StrCat(HloOpcodeString(instruction->opcode()), ".anon"));
} else {
name_uniquer_.GetUniqueName(name);
}
instruction->SetAndSanitizeName(name);
if (instruction->name() != name) {
return Error(name_loc,
StrCat("illegal instruction name: ", name,
"; suggest renaming to: ", instruction->name()));
}
if (sharding) {
instruction->set_sharding(
sharding->NormalizeTupleSharding(instruction->shape()));
}
if (parameter_replication) {
int leaf_count = ShapeUtil::GetLeafCount(instruction->shape());
const auto& replicated =
parameter_replication->replicated_at_leaf_buffers();
if (leaf_count != replicated.size()) {
return Error(lexer_.GetLoc(),
StrCat("parameter has ", leaf_count,
" leaf buffers, but parameter_replication has ",
replicated.size(), " elements."));
}
instruction->set_parameter_replicated_at_leaf_buffers(replicated);
}
if (predecessors) {
for (auto* pre : *predecessors) {
absl::Status status = pre->AddControlDependencyTo(instruction);
if (!status.ok()) {
return Error(name_loc, StrCat("error adding control dependency for: ",
name, " status: ", status.ToString()));
}
}
}
if (metadata) {
instruction->set_metadata(*metadata);
}
if (original_value) {
instruction->set_original_value(*original_value);
}
if (backend_config) {
instruction->set_raw_backend_config_string(std::move(*backend_config));
}
if (frontend_attributes) {
instruction->set_frontend_attributes(*frontend_attributes);
}
if (statistics_viz) {
instruction->set_statistics_viz(*statistics_viz);
}
return AddInstruction(name, instruction, name_loc);
}
HloInstruction* HloParserImpl::CreateInstruction(
HloComputation::Builder* builder, absl::string_view name,
std::optional<Shape> shape, HloOpcode opcode,
std::optional<HloOpcode> async_wrapped_opcode,
absl::flat_hash_map<std::string, AttrConfig>& attrs, bool allow_attributes,
std::vector<HloInstruction*>* preset_operands) {
std::vector<HloInstruction*> operands;
if (preset_operands) {
operands = *preset_operands;
}
const auto maybe_infer_shape =
[&](absl::FunctionRef<absl::StatusOr<Shape>()> infer) {
if (shape.has_value()) {
return true;
}
auto inferred = infer();
if (!inferred.ok()) {
return TokenError(
StrFormat("failed to infer shape for opcode: %s, error: %s",
HloOpcodeString(opcode), inferred.status().message()));
}
shape = std::move(inferred).value();
return true;
};
switch (opcode) {
case HloOpcode::kParameter: {
int64_t parameter_number;
if (!ParseToken(TokKind::kLparen,
"expects '(' before parameter number") ||
!ParseInt64(¶meter_number)) {
return nullptr;
}
const LocTy loc = lexer_.GetLoc();
if (parameter_number < 0) {
Error(loc, "parameter number must be >= 0");
return nullptr;
}
if (!ParseToken(TokKind::kRparen, "expects ')' after parameter number") ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
std::string param_name(name);
auto result = builder->AddParameter(HloInstruction::CreateParameter(
parameter_number, *shape, param_name));
if (!result.ok()) {
Error(loc, result.status().message());
return nullptr;
}
return result.value();
}
case HloOpcode::kConstant: {
Literal literal;
if (!ParseToken(TokKind::kLparen,
"expects '(' before constant literal") ||
!ParseLiteral(&literal, *shape) ||
!ParseToken(TokKind::kRparen, "expects ')' after constant literal") ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
}
case HloOpcode::kIota: {
optional<int64_t> iota_dimension;
attrs["iota_dimension"] = {true, AttrTy::kInt64,
&iota_dimension};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateIota(*shape, *iota_dimension));
}
case HloOpcode::kTopK: {
optional<int64_t> k;
attrs["k"] = {true, AttrTy::kInt64, &k};
optional<bool> largest;
attrs["largest"] = {false, AttrTy::kBool, &largest};
if ((!preset_operands && !ParseOperands(&operands, builder,
1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTopKShape(operands[0]->shape(), *k);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateTopK(
*shape, operands[0], *k, (largest.has_value() ? *largest : true)));
}
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kClz:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopy:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferUnaryOpShape(opcode, operands[0]);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateUnary(*shape, opcode, operands[0]));
}
case HloOpcode::kAdd:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
case HloOpcode::kAtan2:
case HloOpcode::kComplex:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBinaryOpShape(opcode, operands[0],
operands[1]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBinary(
*shape, opcode, operands[0], operands[1]));
}
case HloOpcode::kClamp:
case HloOpcode::kSelect: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 3)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTernaryOpShape(
opcode, operands[0], operands[1], operands[2]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateTernary(
*shape, opcode, operands[0], operands[1], operands[2]));
}
case HloOpcode::kConvert: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateConvert(*shape, operands[0]));
}
case HloOpcode::kBitcastConvert: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateBitcastConvert(*shape, operands[0]));
}
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart: {
CollectiveDeviceList device_list;
optional<int64_t> channel_id;
optional<std::vector<int64_t>> dimensions;
optional<bool> constrain_layout;
optional<bool> use_global_device_ids;
attrs["replica_groups"] = {false,
AttrTy::kCollectiveDeviceList, &device_list};
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
attrs["constrain_layout"] = {false, AttrTy::kBool,
&constrain_layout};
attrs["use_global_device_ids"] = {false, AttrTy::kBool,
&use_global_device_ids};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (opcode == HloOpcode::kAllGather) {
return builder->AddInstruction(HloInstruction::CreateAllGather(
*shape, operands, dimensions->at(0), device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
}
return builder->AddInstruction(HloInstruction::CreateAllGatherStart(
*shape, operands, dimensions->at(0), device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
}
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kReduceScatter: {
CollectiveDeviceList device_list;
optional<HloComputation*> to_apply;
optional<int64_t> channel_id;
optional<bool> constrain_layout;
optional<bool> use_global_device_ids;
optional<std::vector<int64_t>> dimensions;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
attrs["replica_groups"] = {false,
AttrTy::kCollectiveDeviceList, &device_list};
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
attrs["constrain_layout"] = {false, AttrTy::kBool,
&constrain_layout};
attrs["use_global_device_ids"] = {false, AttrTy::kBool,
&use_global_device_ids};
if (opcode == HloOpcode::kReduceScatter) {
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
}
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (opcode == HloOpcode::kAllReduce) {
return builder->AddInstruction(HloInstruction::CreateAllReduce(
*shape, operands, *to_apply, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
} else if (opcode == HloOpcode::kReduceScatter) {
return builder->AddInstruction(HloInstruction::CreateReduceScatter(
*shape, operands, *to_apply, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false,
dimensions->at(0)));
}
return builder->AddInstruction(HloInstruction::CreateAllReduceStart(
*shape, operands, *to_apply, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
}
case HloOpcode::kAllToAll: {
CollectiveDeviceList device_list;
attrs["replica_groups"] = {false,
AttrTy::kCollectiveDeviceList, &device_list};
optional<int64_t> channel_id;
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {false, AttrTy::kBracedInt64List,
&dimensions};
optional<bool> constrain_layout;
attrs["constrain_layout"] = {false, AttrTy::kBool,
&constrain_layout};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape) ||
(dimensions && dimensions->size() != 1)) {
return nullptr;
}
optional<int64_t> split_dimension;
if (dimensions) {
split_dimension = dimensions->at(0);
}
return builder->AddInstruction(HloInstruction::CreateAllToAll(
*shape, operands, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
split_dimension));
}
case HloOpcode::kCollectiveBroadcast: {
CollectiveDeviceList device_list;
attrs["replica_groups"] = {true,
AttrTy::kCollectiveDeviceList, &device_list};
optional<int64_t> channel_id;
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateCollectiveBroadcast(
*shape, operands, device_list, false, channel_id));
}
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart: {
optional<std::vector<std::vector<int64_t>>> source_targets;
attrs["source_target_pairs"] = {
true, AttrTy::kBracedInt64ListList, &source_targets};
optional<int64_t> channel_id;
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
optional<std::vector<std::vector<int64_t>>> slice_sizes;
attrs["slice_sizes"] = {false, AttrTy::kBracedInt64ListList,
&slice_sizes};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
std::vector<std::pair<int64_t, int64_t>> pairs(source_targets->size());
for (int i = 0; i < pairs.size(); i++) {
if ((*source_targets)[i].size() != 2) {
TokenError("expects 'source_target_pairs=' to be a list of pairs");
return nullptr;
}
pairs[i].first = (*source_targets)[i][0];
pairs[i].second = (*source_targets)[i][1];
}
if (!slice_sizes.has_value()) {
if (operands.size() != 1) {
TokenError(
"CollectivePermute and CollectivePermuteStart must have exactly "
"one operand (input buffer) unless it performs dynamic-slice and "
"in-place update.");
return nullptr;
}
if (opcode == HloOpcode::kCollectivePermute) {
return builder->AddInstruction(
HloInstruction::CreateCollectivePermute(*shape, operands[0],
pairs, channel_id));
}
if (opcode == HloOpcode::kCollectivePermuteStart) {
return builder->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(*shape, operands[0],
pairs, channel_id));
}
LOG(FATAL) << "Expect opcode to be CollectivePermute or "
"CollectivePermuteStart, but got "
<< opcode;
}
if (operands.size() != 4) {
TokenError(
"CollectivePermute and CollectivePermuteStart must "
"have exactly four operands for dynamic-slice and "
"in-place update.");
return nullptr;
}
if (opcode == HloOpcode::kCollectivePermute) {
return builder->AddInstruction(HloInstruction::CreateCollectivePermute(
*shape, operands[0], operands[1], operands[2], operands[3], pairs,
*slice_sizes, channel_id));
}
if (opcode == HloOpcode::kCollectivePermuteStart) {
return builder->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
*shape, operands[0], operands[1], operands[2], operands[3],
pairs, *slice_sizes, channel_id));
}
LOG(FATAL) << "Expect opcode to be CollectivePermute or "
"CollectivePermuteStart, but got "
<< opcode;
}
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone: {
std::optional<HloComputation*> async_computation;
if (!preset_operands && !ParseOperands(&operands, builder)) {
return nullptr;
}
auto is_async_shape_correct = [](const Shape& shape) {
return shape.IsTuple() && shape.tuple_shapes_size() >= 2 &&
shape.tuple_shapes(0).IsTuple();
};
if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
if (operands.size() != 1 ||
!is_async_shape_correct(operands[0]->shape())) {
TokenError(
"AsyncUpdate and AsyncDone expect a single operand in the form "
"of ((async-operands), async-outputs, state).");
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncStart ||
opcode == HloOpcode::kAsyncUpdate) {
if (!is_async_shape_correct(*shape)) {
TokenError(
"AsyncStart and AsyncUpdate expect the op shape to be in the "
"form of "
"((async-operands), async-outputs, state).");
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
if (operands.size() != 1 ||
!is_async_shape_correct(operands[0]->shape())) {
TokenError(
"AsyncUpdate and AsyncDone expect a single operand in the form "
"of ((async-operands), async-outputs, state).");
return nullptr;
}
if (!operands[0]->IsAsynchronous()) {
TokenError(
"AsyncUpdate and AsyncDone expect their operand to be the "
"previous async op.");
return nullptr;
}
}
optional<std::string> async_execution_thread;
attrs["async_execution_thread"] = {false, AttrTy::kString,
&async_execution_thread};
if (async_wrapped_opcode) {
if (opcode == HloOpcode::kAsyncStart) {
std::vector<HloInstruction*> async_wrapped_operands;
std::vector<Shape> async_wrapped_operand_shapes;
Shape async_wrapped_root_shape;
async_wrapped_operand_shapes.reserve(operands.size());
for (const HloInstruction* operand : operands) {
async_wrapped_operand_shapes.push_back(operand->shape());
}
async_wrapped_root_shape = shape->tuple_shapes(1);
HloComputation::Builder async_wrapped_builder("async_wrapped");
async_wrapped_operands.reserve(async_wrapped_operand_shapes.size());
for (int i = 0; i < async_wrapped_operand_shapes.size(); ++i) {
async_wrapped_operands.push_back(
async_wrapped_builder.AddInstruction(
HloInstruction::CreateParameter(
i, async_wrapped_operand_shapes.at(i), "async_param")));
}
HloInstruction* root =
CreateInstruction(&async_wrapped_builder, "async_op",
async_wrapped_root_shape, *async_wrapped_opcode,
std::nullopt, attrs,
allow_attributes, &async_wrapped_operands);
if (!root) {
return nullptr;
}
computations_.emplace_back(async_wrapped_builder.Build(root));
async_computation = computations_.back().get();
} else {
if (operands[0]->async_wrapped_opcode() != *async_wrapped_opcode) {
TokenError(
StrFormat("Expect async wrapped opcode to be %s, but got %s",
HloOpcodeString(operands[0]->async_wrapped_opcode()),
HloOpcodeString(*async_wrapped_opcode)));
return nullptr;
}
}
} else {
attrs["calls"] = {opcode == HloOpcode::kAsyncStart,
AttrTy::kHloComputation, &async_computation};
}
if (!(async_wrapped_opcode && opcode == HloOpcode::kAsyncStart)) {
if (!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
if (async_execution_thread &&
operands[0]->async_execution_thread() != *async_execution_thread) {
TokenError(StrFormat(
"Expect async_execution_thread to be %s, but got %s",
operands[0]->async_execution_thread(), *async_execution_thread));
return nullptr;
}
if (async_computation &&
operands[0]->async_wrapped_computation() != *async_computation) {
TokenError(
StrFormat("Expect async_wrapped_computation to be %s, but got %s",
operands[0]->async_wrapped_computation()->name(),
(*async_computation)->name()));
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncStart &&
(*async_computation)->IsAsyncComputation()) {
TokenError(StrFormat(
"Computation %s is already referenced by another async op",
(*async_computation)->name()));
return nullptr;
}
if (opcode == HloOpcode::kAsyncStart) {
if (!async_execution_thread) {
async_execution_thread = HloInstruction::kMainExecutionThread;
}
return builder->AddInstruction(HloInstruction::CreateAsyncStart(
*shape, operands, *async_computation, *async_execution_thread));
}
if (opcode == HloOpcode::kAsyncUpdate) {
return builder->AddInstruction(
HloInstruction::CreateAsyncUpdate(*shape, operands[0]));
}
return builder->AddInstruction(
HloInstruction::CreateAsyncDone(*shape, operands[0]));
}
case HloOpcode::kCopyStart: {
optional<int> cross_program_prefetch_index = std::nullopt;
attrs["cross_program_prefetch_index"] = {
false, AttrTy::kInt32, &cross_program_prefetch_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateCopyStart(
*shape, operands[0], cross_program_prefetch_index));
}
case HloOpcode::kReplicaId: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (shape.has_value()) {
return builder->AddInstruction(HloInstruction::CreateReplicaId(*shape));
}
return builder->AddInstruction(HloInstruction::CreateReplicaId());
}
case HloOpcode::kPartitionId: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (shape.has_value()) {
return builder->AddInstruction(
HloInstruction::CreatePartitionId(*shape));
}
return builder->AddInstruction(HloInstruction::CreatePartitionId());
}
case HloOpcode::kDynamicReshape: {
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDynamicReshape(
*shape, operands[0],
absl::Span<HloInstruction* const>(operands).subspan(1)));
}
case HloOpcode::kReshape: {
optional<int64_t> inferred_dimension;
attrs["inferred_dimension"] = {false, AttrTy::kInt64,
&inferred_dimension};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReshape(
*shape, operands[0], inferred_dimension.value_or(-1)));
}
case HloOpcode::kAfterAll: {
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.empty()) {
return builder->AddInstruction(HloInstruction::CreateToken());
}
return builder->AddInstruction(HloInstruction::CreateAfterAll(operands));
}
case HloOpcode::kAddDependency: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateAddDependency(operands[0], operands[1]));
}
case HloOpcode::kSort: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
optional<bool> is_stable = false;
attrs["is_stable"] = {false, AttrTy::kBool, &is_stable};
optional<HloComputation*> to_apply;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape) ||
dimensions->size() != 1) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferVariadicOpShape(opcode, arg_shapes);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateSort(*shape, dimensions->at(0), operands,
to_apply.value(), is_stable.value()));
}
case HloOpcode::kTuple: {
if ((!preset_operands &&
!(shape.has_value()
? ParseOperands(&operands, builder, shape->tuple_shapes_size())
: ParseOperands(&operands, builder))) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferVariadicOpShape(opcode, arg_shapes);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateVariadic(*shape, HloOpcode::kTuple, operands));
}
case HloOpcode::kWhile: {
optional<HloComputation*> condition;
optional<HloComputation*> body;
attrs["condition"] = {true, AttrTy::kHloComputation,
&condition};
attrs["body"] = {true, AttrTy::kHloComputation, &body};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferWhileShape(
condition.value()->ComputeProgramShape(),
body.value()->ComputeProgramShape(), operands[0]->shape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateWhile(
*shape, *condition, *body, operands[0]));
}
case HloOpcode::kRecv: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateRecv(
shape->tuple_shapes(0), operands[0], *channel_id, *is_host_transfer));
}
case HloOpcode::kRecvDone: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (dynamic_cast<const HloChannelInstruction*>(operands[0]) != nullptr) {
if (channel_id != operands[0]->channel_id()) {
return nullptr;
}
}
return builder->AddInstruction(HloInstruction::CreateRecvDone(
operands[0], channel_id.value(), *is_host_transfer));
}
case HloOpcode::kSend: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSend(
operands[0], operands[1], *channel_id, *is_host_transfer));
}
case HloOpcode::kSendDone: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (dynamic_cast<const HloChannelInstruction*>(operands[0]) != nullptr) {
if (channel_id != operands[0]->channel_id()) {
return nullptr;
}
}
return builder->AddInstruction(HloInstruction::CreateSendDone(
operands[0], channel_id.value(), *is_host_transfer));
}
case HloOpcode::kGetTupleElement: {
optional<int64_t> index;
attrs["index"] = {true, AttrTy::kInt64, &index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeUtil::GetTupleElementShape(operands[0]->shape(),
*index);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateGetTupleElement(*shape, operands[0], *index));
}
case HloOpcode::kCall: {
optional<HloComputation*> to_apply;
optional<bool> is_composite = false;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
attrs["is_composite"] = {false, AttrTy::kBool,
&is_composite};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferCallShape(
arg_shapes, to_apply.value()->ComputeProgramShape());
})) {
return nullptr;
}
auto call_op = HloInstruction::CreateCall(*shape, operands, *to_apply);
call_op->set_is_composite(is_composite.value());
return builder->AddInstruction(std::move(call_op));
}
case HloOpcode::kReduceWindow: {
optional<HloComputation*> reduce_computation;
optional<Window> window;
attrs["window"] = {false, AttrTy::kWindow, &window};
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&reduce_computation};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!window) {
window.emplace();
}
if (operands.size() % 2) {
TokenError(StrCat("expects an even number of operands, but has ",
operands.size(), " operands"));
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferReduceWindowShape(
operands[0]->shape(), operands[1]->shape(), *window,
reduce_computation.value()->ComputeProgramShape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReduceWindow(
*shape,
absl::Span<HloInstruction* const>(operands).subspan(
0, operands.size() / 2),
absl::Span<HloInstruction* const>(operands).subspan(operands.size() /
2),
*window, *reduce_computation));
}
case HloOpcode::kConvolution: {
optional<Window> window;
optional<ConvolutionDimensionNumbers> dnums;
optional<int64_t> feature_group_count;
optional<int64_t> batch_group_count;
attrs["window"] = {false, AttrTy::kWindow, &window};
attrs["dim_labels"] = {true,
AttrTy::kConvolutionDimensionNumbers, &dnums};
attrs["feature_group_count"] = {false, AttrTy::kInt64,
&feature_group_count};
attrs["batch_group_count"] = {false, AttrTy::kInt64,
&batch_group_count};
optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {false, AttrTy::kPrecisionList,
&operand_precision};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!window) {
window.emplace();
}
if (!feature_group_count) {
feature_group_count = 1;
}
if (!batch_group_count) {
batch_group_count = 1;
}
PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
operands.size(), PrecisionConfig::DEFAULT);
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferConvolveShape(
operands[0]->shape(), operands[1]->shape(),
*feature_group_count, *batch_group_count, *window, *dnums,
std::nullopt);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateConvolve(
*shape, operands[0], operands[1],
feature_group_count.value(), batch_group_count.value(), *window,
*dnums, precision_config));
}
case HloOpcode::kFft: {
optional<FftType> fft_type;
optional<std::vector<int64_t>> fft_length;
attrs["fft_type"] = {true, AttrTy::kFftType, &fft_type};
attrs["fft_length"] = {true, AttrTy::kBracedInt64List,
&fft_length};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferFftShape(operands[0]->shape(),
*fft_type, *fft_length);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateFft(
*shape, operands[0], *fft_type, *fft_length));
}
case HloOpcode::kTriangularSolve: {
TriangularSolveOptions options;
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
(allow_attributes && !ParseAttributesAsProtoMessage(
attrs, &options))) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTriangularSolveShape(
operands[0]->shape(), operands[1]->shape(), options);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateTriangularSolve(
*shape, operands[0], operands[1], options));
}
case HloOpcode::kCompare: {
optional<ComparisonDirection> direction;
optional<Comparison::Type> type;
attrs["direction"] = {true, AttrTy::kComparisonDirection,
&direction};
attrs["type"] = {false, AttrTy::kComparisonType, &type};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBinaryOpShape(opcode, operands[0],
operands[1]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateCompare(
*shape, operands[0], operands[1], *direction, type));
}
case HloOpcode::kCholesky: {
CholeskyOptions options;
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
(allow_attributes && !ParseAttributesAsProtoMessage(
attrs, &options))) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferCholeskyShape(operands[0]->shape());
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateCholesky(*shape, operands[0], options));
}
case HloOpcode::kBroadcast: {
if (!preset_operands &&
!ParseOperands(&operands, builder, 1)) {
return nullptr;
}
bool operand_is_scalar = ShapeUtil::IsScalar(operands[0]->shape());
optional<std::vector<int64_t>> broadcast_dimensions;
attrs["dimensions"] = {!operand_is_scalar,
AttrTy::kBracedInt64List, &broadcast_dimensions};
if (!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operand_is_scalar && !broadcast_dimensions.has_value()) {
broadcast_dimensions.emplace();
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBroadcastShape(operands[0]->shape(),
*broadcast_dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBroadcast(
*shape, operands[0], *broadcast_dimensions));
}
case HloOpcode::kConcatenate: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape) ||
dimensions->size() != 1) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferConcatOpShape(arg_shapes,
dimensions->at(0));
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateConcatenate(
*shape, operands, dimensions->at(0)));
}
case HloOpcode::kMap: {
optional<HloComputation*> to_apply;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {false, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferMapShape(
arg_shapes, to_apply.value()->ComputeProgramShape(),
*dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateMap(*shape, operands, *to_apply));
}
case HloOpcode::kReduce: {
optional<HloComputation*> reduce_computation;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&reduce_computation};
optional<std::vector<int64_t>> dimensions_to_reduce;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions_to_reduce};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.size() % 2) {
TokenError(StrCat("expects an even number of operands, but has ",
operands.size(), " operands"));
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferReduceShape(
arg_shapes, *dimensions_to_reduce,
reduce_computation.value()->ComputeProgramShape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReduce(
*shape,
absl::Span<HloInstruction* const>(operands).subspan(
0, operands.size() / 2),
absl::Span<HloInstruction* const>(operands).subspan(operands.size() /
2),
*dimensions_to_reduce, *reduce_computation));
}
case HloOpcode::kReverse: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferReverseShape(operands[0]->shape(),
*dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateReverse(*shape, operands[0], *dimensions));
}
case HloOpcode::kSelectAndScatter: {
optional<HloComputation*> select;
attrs["select"] = {true, AttrTy::kHloComputation, &select};
optional<HloComputation*> scatter;
attrs["scatter"] = {true, AttrTy::kHloComputation, &scatter};
optional<Window> window;
attrs["window"] = {false, AttrTy::kWindow, &window};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 3)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!window) {
window.emplace();
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferSelectAndScatterShape(
operands[0]->shape(), select.value()->ComputeProgramShape(),
*window, operands[1]->shape(), operands[2]->shape(),
scatter.value()->ComputeProgramShape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSelectAndScatter(
*shape, operands[0], *select, *window,
operands[1], operands[2], *scatter));
}
case HloOpcode::kSlice: {
optional<SliceRanges> slice_ranges;
attrs["slice"] = {true, AttrTy::kSliceRanges, &slice_ranges};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSlice(
*shape, operands[0], slice_ranges->starts, slice_ranges->limits,
slice_ranges->strides));
}
case HloOpcode::kDynamicSlice: {
optional<std::vector<int64_t>> dynamic_slice_sizes;
attrs["dynamic_slice_sizes"] = {
true, AttrTy::kBracedInt64List, &dynamic_slice_sizes};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.empty()) {
TokenError("Expected at least one operand.");
return nullptr;
}
if (!(operands.size() == 2 && operands[1]->shape().rank() == 1) &&
operands.size() != 1 + operands[0]->shape().rank()) {
TokenError("Wrong number of operands.");
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDynamicSlice(
*shape, operands[0],
absl::MakeSpan(operands).subspan(1),
*dynamic_slice_sizes));
}
case HloOpcode::kDynamicUpdateSlice: {
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.size() < 2) {
TokenError("Expected at least two operands.");
return nullptr;
}
if (!(operands.size() == 3 && operands[2]->shape().rank() == 1) &&
operands.size() != 2 + operands[0]->shape().rank()) {
TokenError("Wrong number of operands.");
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
*shape, operands[0], operands[1],
absl::MakeSpan(operands).subspan(2)));
}
case HloOpcode::kTranspose: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTransposeShape(operands[0]->shape(),
*dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateTranspose(*shape, operands[0], *dimensions));
}
case HloOpcode::kBatchNormTraining: {
optional<float> epsilon;
attrs["epsilon"] = {true, AttrTy::kFloat, &epsilon};
optional<int64_t> feature_index;
attrs["feature_index"] = {true, AttrTy::kInt64,
&feature_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 3)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBatchNormTrainingShape(
operands[0]->shape(), operands[1]->shape(),
operands[2]->shape(), *feature_index);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBatchNormTraining(
*shape, operands[0], operands[1],
operands[2], *epsilon, *feature_index));
}
case HloOpcode::kBatchNormInference: {
optional<float> epsilon;
attrs["epsilon"] = {true, AttrTy::kFloat, &epsilon};
optional<int64_t> feature_index;
attrs["feature_index"] = {true, AttrTy::kInt64,
&feature_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 5)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBatchNormInferenceShape(
operands[0]->shape(), operands[1]->shape(),
operands[2]->shape(), operands[3]->shape(),
operands[4]->shape(), *feature_index);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBatchNormInference(
*shape, operands[0], operands[1],
operands[2], operands[3],
operands[4], *epsilon, *feature_index));
}
case HloOpcode::kBatchNormGrad: {
optional<float> epsilon;
attrs["epsilon"] = {true, AttrTy::kFloat, &epsilon};
optional<int64_t> feature_index;
attrs["feature_index"] = {true, AttrTy::kInt64,
&feature_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 5)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBatchNormGradShape(
operands[0]->shape(), operands[1]->shape(),
operands[2]->shape(), operands[3]->shape(),
operands[4]->shape(), *feature_index);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBatchNormGrad(
*shape, operands[0], operands[1],
operands[2], operands[3],
operands[4], *epsilon, *feature_index));
}
case HloOpcode::kPad: {
optional<PaddingConfig> padding;
attrs["padding"] = {true, AttrTy::kPaddingConfig, &padding};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferPadShape(
operands[0]->shape(), operands[1]->shape(), *padding);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreatePad(
*shape, operands[0], operands[1], *padding));
}
case HloOpcode::kFusion: {
optional<HloComputation*> fusion_computation;
attrs["calls"] = {true, AttrTy::kHloComputation,
&fusion_computation};
optional<HloInstruction::FusionKind> fusion_kind;
attrs["kind"] = {true, AttrTy::kFusionKind, &fusion_kind};
optional<
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>>
output_to_operand_aliasing;
attrs["output_to_operand_aliasing"] = {false,
AttrTy::kInstructionAliasing,
&output_to_operand_aliasing};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
auto instr = builder->AddInstruction(HloInstruction::CreateFusion(
*shape, *fusion_kind, operands, *fusion_computation));
auto fusion_instr = Cast<HloFusionInstruction>(instr);
if (output_to_operand_aliasing.has_value()) {
fusion_instr->set_output_to_operand_aliasing(
std::move(*output_to_operand_aliasing));
}
return instr;
}
case HloOpcode::kInfeed: {
optional<std::string> config;
attrs["infeed_config"] = {false, AttrTy::kString, &config};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!shape->IsTuple() && !ShapeUtil::IsEmptyTuple(*shape)) {
TokenError("infeed must have a non-empty tuple shape");
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::GetTupleElementShape(*shape, 0), operands[0],
config ? *config : ""));
}
case HloOpcode::kOutfeed: {
optional<std::string> config;
optional<Shape> outfeed_shape;
attrs["outfeed_config"] = {false, AttrTy::kString, &config};
attrs["outfeed_shape"] = {false, AttrTy::kShape,
&outfeed_shape};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
HloInstruction* const outfeed_input = operands[0];
HloInstruction* const outfeed_token = operands[1];
const Shape shape =
outfeed_shape.has_value() ? *outfeed_shape : outfeed_input->shape();
return builder->AddInstruction(HloInstruction::CreateOutfeed(
shape, outfeed_input, outfeed_token, config ? *config : ""));
}
case HloOpcode::kRng: {
optional<RandomDistribution> distribution;
attrs["distribution"] = {true, AttrTy::kDistribution,
&distribution};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateRng(*shape, *distribution, operands));
}
case HloOpcode::kRngGetAndUpdateState: {
optional<int64_t> delta;
attrs["delta"] = {true, AttrTy::kInt64, &delta};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateRngGetAndUpdateState(*shape, *delta));
}
case HloOpcode::kRngBitGenerator: {
optional<RandomAlgorithm> algorithm;
attrs["algorithm"] = {true, AttrTy::kRandomAlgorithm,
&algorithm};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateRngBitGenerator(
*shape, operands[0], *algorithm));
}
case HloOpcode::kReducePrecision: {
optional<int64_t> exponent_bits;
optional<int64_t> mantissa_bits;
attrs["exponent_bits"] = {true, AttrTy::kInt64,
&exponent_bits};
attrs["mantissa_bits"] = {true, AttrTy::kInt64,
&mantissa_bits};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReducePrecision(
*shape, operands[0], static_cast<int>(*exponent_bits),
static_cast<int>(*mantissa_bits)));
}
case HloOpcode::kConditional: {
optional<HloComputation*> true_computation;
optional<HloComputation*> false_computation;
optional<std::vector<HloComputation*>> branch_computations;
if (!preset_operands && !ParseOperands(&operands, builder)) {
return nullptr;
}
if (!ShapeUtil::IsScalar(operands[0]->shape())) {
TokenError("The first operand must be a scalar");
return nullptr;
}
const bool branch_index_is_bool =
operands[0]->shape().element_type() == PRED;
if (branch_index_is_bool) {
attrs["true_computation"] = {true, AttrTy::kHloComputation,
&true_computation};
attrs["false_computation"] = {
true, AttrTy::kHloComputation, &false_computation};
} else {
if (operands[0]->shape().element_type() != S32) {
TokenError("The first operand must be a scalar of PRED or S32");
return nullptr;
}
attrs["branch_computations"] = {true,
AttrTy::kBracedHloComputationList,
&branch_computations};
}
if (!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (branch_index_is_bool) {
branch_computations.emplace({*true_computation, *false_computation});
}
if (branch_computations->empty() ||
operands.size() != branch_computations->size() + 1) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<ProgramShape, 2> branch_computation_shapes;
branch_computation_shapes.reserve(branch_computations->size());
for (auto* computation : *branch_computations) {
branch_computation_shapes.push_back(
computation->ComputeProgramShape());
}
absl::InlinedVector<Shape, 2> branch_operand_shapes;
branch_operand_shapes.reserve(operands.size() - 1);
for (int i = 1; i < operands.size(); ++i) {
branch_operand_shapes.push_back(operands[i]->shape());
}
return ShapeInference::InferConditionalShape(
operands[0]->shape(), branch_computation_shapes,
branch_operand_shapes);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateConditional(
*shape, operands[0],
absl::MakeSpan(*branch_computations),
absl::MakeSpan(operands).subspan(1)));
}
case HloOpcode::kCustomCall: {
optional<std::string> custom_call_target;
optional<Window> window;
optional<ConvolutionDimensionNumbers> dnums;
optional<int64_t> feature_group_count;
optional<int64_t> batch_group_count;
optional<std::vector<Shape>> operand_layout_constraints;
optional<bool> custom_call_has_side_effect;
optional<HloComputation*> to_apply;
optional<
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>>
output_to_operand_aliasing;
optional<PaddingType> padding_type;
optional<std::vector<HloComputation*>> called_computations;
optional<CustomCallSchedule> custom_call_schedule;
optional<CustomCallApiVersion> api_version;
attrs["custom_call_target"] = {true, AttrTy::kString,
&custom_call_target};
attrs["window"] = {false, AttrTy::kWindow, &window};
attrs["dim_labels"] = {false,
AttrTy::kConvolutionDimensionNumbers, &dnums};
attrs["feature_group_count"] = {false, AttrTy::kInt64,
&feature_group_count};
attrs["batch_group_count"] = {false, AttrTy::kInt64,
&batch_group_count};
attrs["operand_layout_constraints"] = {
false, AttrTy::kShapeList, &operand_layout_constraints};
attrs["custom_call_has_side_effect"] = {false, AttrTy::kBool,
&custom_call_has_side_effect};
attrs["to_apply"] = {false, AttrTy::kHloComputation,
&to_apply};
attrs["called_computations"] = {false,
AttrTy::kBracedHloComputationList,
&called_computations};
attrs["output_to_operand_aliasing"] = {false,
AttrTy::kInstructionAliasing,
&output_to_operand_aliasing};
attrs["padding_type"] = {false, AttrTy::kPaddingType,
&padding_type};
optional<Literal> literal;
attrs["literal"] = {false, AttrTy::kLiteral, &literal};
optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {false, AttrTy::kPrecisionList,
&operand_precision};
HloInstruction* instruction;
if (called_computations.has_value() && to_apply.has_value()) {
TokenError(
"A single instruction can't have both to_apply and "
"calls field");
return nullptr;
}
attrs["schedule"] = {false, AttrTy::kCustomCallSchedule,
&custom_call_schedule};
attrs["api_version"] = {false, AttrTy::kCustomCallApiVersion,
&api_version};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (api_version.has_value() &&
*api_version == CustomCallApiVersion::API_VERSION_UNSPECIFIED) {
TokenError(StrCat("Invalid API version: ",
CustomCallApiVersion_Name(*api_version)));
return nullptr;
}
if (operand_layout_constraints.has_value()) {
if (!LayoutUtil::HasLayout(*shape)) {
TokenError("Layout must be set on layout-constrained custom call");
return nullptr;
}
if (operands.size() != operand_layout_constraints->size()) {
TokenError(StrCat("Expected ", operands.size(),
" operand layout constraints, ",
operand_layout_constraints->size(), " given"));
return nullptr;
}
for (int64_t i = 0; i < operands.size(); ++i) {
const Shape& operand_shape_with_layout =
(*operand_layout_constraints)[i];
if (!LayoutUtil::HasLayout(operand_shape_with_layout)) {
TokenError(StrCat(
"Operand layout constraint shape ",
ShapeUtil::HumanStringWithLayout(operand_shape_with_layout),
" for operand ", i, " does not have a layout"));
return nullptr;
}
if (!ShapeUtil::Compatible(operand_shape_with_layout,
operands[i]->shape())) {
TokenError(StrCat(
"Operand layout constraint shape ",
ShapeUtil::HumanStringWithLayout(operand_shape_with_layout),
" for operand ", i, " is not compatible with operand shape ",
ShapeUtil::HumanStringWithLayout(operands[i]->shape())));
return nullptr;
}
}
instruction = builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *custom_call_target, *operand_layout_constraints,
""));
} else {
if (to_apply.has_value()) {
instruction =
builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *to_apply, *custom_call_target, ""));
} else if (called_computations.has_value()) {
instruction =
builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *called_computations, *custom_call_target,
""));
} else {
instruction =
builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *custom_call_target, ""));
}
}
auto custom_call_instr = Cast<HloCustomCallInstruction>(instruction);
if (window.has_value()) {
custom_call_instr->set_window(*window);
}
if (dnums.has_value()) {
custom_call_instr->set_convolution_dimension_numbers(*dnums);
}
if (feature_group_count.has_value()) {
custom_call_instr->set_feature_group_count(*feature_group_count);
}
if (batch_group_count.has_value()) {
custom_call_instr->set_batch_group_count(*batch_group_count);
}
if (padding_type.has_value()) {
custom_call_instr->set_padding_type(*padding_type);
}
if (custom_call_has_side_effect.has_value()) {
custom_call_instr->set_custom_call_has_side_effect(
*custom_call_has_side_effect);
}
if (custom_call_schedule.has_value()) {
custom_call_instr->set_custom_call_schedule(*custom_call_schedule);
}
if (api_version.has_value()) {
custom_call_instr->set_api_version(*api_version);
}
if (output_to_operand_aliasing.has_value()) {
custom_call_instr->set_output_to_operand_aliasing(
std::move(*output_to_operand_aliasing));
}
if (literal.has_value()) {
custom_call_instr->set_literal(std::move(*literal));
}
PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
operands.size(), PrecisionConfig::DEFAULT);
}
*custom_call_instr->mutable_precision_config() = precision_config;
return instruction;
}
case HloOpcode::kDot: {
optional<std::vector<int64_t>> lhs_contracting_dims;
attrs["lhs_contracting_dims"] = {
false, AttrTy::kBracedInt64List, &lhs_contracting_dims};
optional<std::vector<int64_t>> rhs_contracting_dims;
attrs["rhs_contracting_dims"] = {
false, AttrTy::kBracedInt64List, &rhs_contracting_dims};
optional<std::vector<int64_t>> lhs_batch_dims;
attrs["lhs_batch_dims"] = {false, AttrTy::kBracedInt64List,
&lhs_batch_dims};
optional<std::vector<int64_t>> rhs_batch_dims;
attrs["rhs_batch_dims"] = {false, AttrTy::kBracedInt64List,
&rhs_batch_dims};
optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {false, AttrTy::kPrecisionList,
&operand_precision};
std::vector<SparsityDescriptor> sparsity;
attrs["sparsity"] = {false, AttrTy::kSparsityDescriptor,
&sparsity};
optional<PrecisionConfig::Algorithm> algorithm;
attrs["algorithm"] = {false, AttrTy::kPrecisionAlgorithm,
&algorithm};
LocTy loc = lexer_.GetLoc();
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
int expected_size = HloDotInstruction::kOperands + sparsity.size();
if (sparsity.size() > HloDotInstruction::kOperands) {
Error(loc,
StrCat("too many sparse dot descriptors: ", sparsity.size()));
return nullptr;
}
if (operands.size() != expected_size) {
Error(loc, StrCat("expects ", expected_size, " operands, but has ",
operands.size(), " operands"));
return nullptr;
}
DotDimensionNumbers dnum;
if (lhs_contracting_dims) {
*dnum.mutable_lhs_contracting_dimensions() = {
lhs_contracting_dims->begin(), lhs_contracting_dims->end()};
}
if (rhs_contracting_dims) {
*dnum.mutable_rhs_contracting_dimensions() = {
rhs_contracting_dims->begin(), rhs_contracting_dims->end()};
}
if (lhs_batch_dims) {
*dnum.mutable_lhs_batch_dimensions() = {lhs_batch_dims->begin(),
lhs_batch_dims->end()};
}
if (rhs_batch_dims) {
*dnum.mutable_rhs_batch_dimensions() = {rhs_batch_dims->begin(),
rhs_batch_dims->end()};
}
PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
HloDotInstruction::kOperands, PrecisionConfig::DEFAULT);
}
if (algorithm) {
precision_config.set_algorithm(*algorithm);
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferDotOpShape(
operands[0]->shape(), operands[1]->shape(), dnum,
std::nullopt, sparsity);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDot(
*shape, operands[0], operands[1], dnum, precision_config, sparsity,
absl::MakeSpan(operands).subspan(HloDotInstruction::kOperands)));
}
case HloOpcode::kGather: {
optional<std::vector<int64_t>> offset_dims;
attrs["offset_dims"] = {true, AttrTy::kBracedInt64List,
&offset_dims};
optional<std::vector<int64_t>> collapsed_slice_dims;
attrs["collapsed_slice_dims"] = {
true, AttrTy::kBracedInt64List, &collapsed_slice_dims};
optional<std::vector<int64_t>> start_index_map;
attrs["start_index_map"] = {true, AttrTy::kBracedInt64List,
&start_index_map};
optional<int64_t> index_vector_dim;
attrs["index_vector_dim"] = {true, AttrTy::kInt64,
&index_vector_dim};
optional<std::vector<int64_t>> slice_sizes;
attrs["slice_sizes"] = {true, AttrTy::kBracedInt64List,
&slice_sizes};
optional<bool> indices_are_sorted = false;
attrs["indices_are_sorted"] = {false, AttrTy::kBool,
&indices_are_sorted};
optional<std::vector<int64_t>> operand_batching_dims;
attrs["operand_batching_dims"] = {
false, AttrTy::kBracedInt64List, &operand_batching_dims};
optional<std::vector<int64_t>> start_indices_batching_dims;
attrs["start_indices_batching_dims"] = {false,
AttrTy::kBracedInt64List,
&start_indices_batching_dims};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
GatherDimensionNumbers dim_numbers =
HloGatherInstruction::MakeGatherDimNumbers(
*offset_dims,
*collapsed_slice_dims,
*start_index_map,
*index_vector_dim,
operand_batching_dims ? *operand_batching_dims
: std::vector<int64_t>(),
start_indices_batching_dims ? *start_indices_batching_dims
: std::vector<int64_t>());
if (!maybe_infer_shape([&] {
return ShapeInference::InferGatherShape(operands[0]->shape(),
operands[1]->shape(),
dim_numbers, *slice_sizes);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateGather(
*shape, operands[0], operands[1],
dim_numbers, *slice_sizes, indices_are_sorted.value()));
}
case HloOpcode::kScatter: {
optional<std::vector<int64_t>> update_window_dims;
attrs["update_window_dims"] = {
true, AttrTy::kBracedInt64List, &update_window_dims};
optional<std::vector<int64_t>> inserted_window_dims;
attrs["inserted_window_dims"] = {
true, AttrTy::kBracedInt64List, &inserted_window_dims};
optional<std::vector<int64_t>> scatter_dims_to_operand_dims;
attrs["scatter_dims_to_operand_dims"] = {true,
AttrTy::kBracedInt64List,
&scatter_dims_to_operand_dims};
optional<int64_t> index_vector_dim;
attrs["index_vector_dim"] = {true, AttrTy::kInt64,
&index_vector_dim};
optional<HloComputation*> update_computation;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&update_computation};
optional<bool> indices_are_sorted = false;
attrs["indices_are_sorted"] = {false, AttrTy::kBool,
&indices_are_sorted};
optional<bool> unique_indices = false;
attrs["unique_indices"] = {false, AttrTy::kBool,
&unique_indices};
optional<std::vector<int64_t>> input_batching_dims;
attrs["input_batching_dims"] = {
false, AttrTy::kBracedInt64List, &input_batching_dims};
optional<std::vector<int64_t>> scatter_indices_batching_dims;
attrs["scatter_indices_batching_dims"] = {false,
AttrTy::kBracedInt64List,
&scatter_indices_batching_dims};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.size() % 2 == 0) {
TokenError(StrCat("expects an odd number of operands, but has ",
operands.size(), " operands"));
return nullptr;
}
ScatterDimensionNumbers dim_numbers =
HloScatterInstruction::MakeScatterDimNumbers(
*update_window_dims,
*inserted_window_dims,
*scatter_dims_to_operand_dims,
*index_vector_dim,
input_batching_dims ? *input_batching_dims
: std::vector<int64_t>(),
scatter_indices_batching_dims ? *scatter_indices_batching_dims
: std::vector<int64_t>());
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 3> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferScatterShape(
arg_shapes, update_computation.value()->ComputeProgramShape(),
dim_numbers);
})) {
return nullptr;
}
auto input_count = operands.size() / 2;
auto operand_span = absl::MakeConstSpan(operands);
return builder->AddInstruction(HloInstruction::CreateScatter(
*shape, operand_span.first(input_count), operands[input_count],
operand_span.last(input_count), *update_computation, dim_numbers,
indices_are_sorted.value(), unique_indices.value()));
}
case HloOpcode::kDomain: {
DomainData domain;
attrs["domain"] = {true, AttrTy::kDomain, &domain};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferUnaryOpShape(opcode, operands[0]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDomain(
*shape, operands[0], std::move(domain.exit_metadata),
std::move(domain.entry_metadata)));
}
case HloOpcode::kGetDimensionSize: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferGetDimensionSizeShape(
operands[0]->shape(), dimensions->at(0));
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateGetDimensionSize(
*shape, operands[0], (*dimensions)[0]));
}
case HloOpcode::kSetDimensionSize: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferSetDimensionSizeShape(
operands[0]->shape(), operands[1]->shape(), dimensions->at(0));
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSetDimensionSize(
*shape, operands[0], operands[1], (*dimensions)[0]));
}
default:
return nullptr;
}
}
bool HloParserImpl::ParseCollectiveDeviceList(
CollectiveDeviceList* device_list) {
if (lexer_.GetKind() == TokKind::kLbrace) {
std::vector<ReplicaGroup> replica_groups;
if (!ParseReplicaGroupsOnly(&replica_groups)) {
return false;
}
*device_list = CollectiveDeviceList(replica_groups);
return true;
}
std::vector<int64_t> tile_assignment_dimensions;
std::vector<int64_t> iota_reshape_dims;
std::vector<int> iota_transpose_perm;
if (!ParseTileAssignment(tile_assignment_dimensions, iota_reshape_dims,
iota_transpose_perm, nullptr)) {
return false;
}
if (tile_assignment_dimensions.size() != 2) {
VLOG(kErrorLevel)
<< "Expected tile assignment to have 2 dimensions for collective "
"device list but got "
<< tile_assignment_dimensions.size();
return false;
}
*device_list = CollectiveDeviceList(IotaReplicaGroupList(
tile_assignment_dimensions[0], tile_assignment_dimensions[1],
iota_reshape_dims, iota_transpose_perm));
return true;
}
bool HloParserImpl::ParseSharding(std::optional<HloSharding>& sharding) {
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kLbrace &&
lexer_.GetKind() != TokKind::kRbrace) {
return ParseSingleSharding(sharding, true);
}
std::vector<HloSharding> tuple_shardings;
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
std::optional<HloSharding> tuple_sharding;
if (!ParseSingleSharding(tuple_sharding,
false)) {
return false;
}
tuple_shardings.push_back(std::move(*tuple_sharding));
} while (EatIfPresent(TokKind::kComma));
}
sharding = HloSharding::FlatTuple(std::move(tuple_shardings));
return ParseToken(TokKind::kRbrace, "expected '}' to end sharding attribute");
}
bool HloParserImpl::ParseFrontendAttributes(
FrontendAttributes* frontend_attributes) {
CHECK(frontend_attributes != nullptr);
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start frontend attributes")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRbrace) {
} else {
do {
std::string attribute;
if (!ParseAttributeName(&attribute)) {
return false;
}
std::string result;
if (lexer_.GetKind() == TokKind::kString) {
if (!ParseString(&result)) {
return false;
}
} else if (lexer_.GetKind() == TokKind::kLbrace) {
if (!ParseJsonDict(&result)) {
return false;
}
} else {
return false;
}
(*frontend_attributes->mutable_map())[attribute] = result;
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace,
"expects '}' at the end of frontend attributes");
}
bool HloParserImpl::ParseStatisticsViz(StatisticsViz* statistics_viz) {
CHECK(statistics_viz != nullptr);
if (!ParseToken(TokKind::kLbrace, "expected '{' to start statistics")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRbrace) {
} else {
std::string visualizing_index_attr_name;
if (!ParseAttributeName(&visualizing_index_attr_name)) {
return false;
}
if (lexer_.GetKind() != TokKind::kInt) {
return false;
}
statistics_viz->set_stat_index_to_visualize(lexer_.GetInt64Val());
lexer_.Lex();
while (EatIfPresent(TokKind::kComma)) {
std::string stat_name;
if (!ParseAttributeName(&stat_name)) {
return false;
}
if (lexer_.GetKind() != TokKind::kDecimal &&
lexer_.GetKind() != TokKind::kInt) {
return false;
}
Statistic statistic;
statistic.set_stat_name(stat_name);
statistic.set_stat_val(lexer_.GetKind() == TokKind::kDecimal
? lexer_.GetDecimalVal()
: lexer_.GetInt64Val());
lexer_.Lex();
*statistics_viz->add_statistics() = std::move(statistic);
}
}
return ParseToken(TokKind::kRbrace, "expects '}' at the end of statistics");
}
bool HloParserImpl::ParseTileAssignment(
std::vector<int64_t>& tile_assignment_dimensions,
std::vector<int64_t>& iota_reshape_dims,
std::vector<int>& iota_transpose_perm, std::vector<int64_t>* devices) {
if (!ParseToken(TokKind::kLsquare,
"expected '[' to start sharding devices shape")) {
return false;
}
do {
int64_t dim;
if (!ParseInt64(&dim)) {
return false;
}
tile_assignment_dimensions.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (!ParseToken(TokKind::kRsquare,
"expected ']' to end sharding devices shape")) {
return false;
}
if (lexer_.GetKind() == TokKind::kLeq) {
lexer_.Lex();
if (!ParseToken(TokKind::kLsquare,
"expected '[' to start sharding iota_reshape_dims")) {
return false;
}
do {
int64_t dim;
if (!ParseInt64(&dim)) {
return false;
}
iota_reshape_dims.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (iota_reshape_dims.empty()) {
return TokenError("expected non-empty iota_reshape_dims");
}
if (!ParseToken(TokKind::kRsquare,
"expected ']' to end sharding iota_reshape_dims")) {
return false;
}
if (iota_reshape_dims.size() == 1) {
iota_transpose_perm.push_back(0);
} else {
if (lexer_.GetKind() != TokKind::kIdent || lexer_.GetStrVal() != "T") {
return TokenError(
"expected 'T(' to start sharding devices "
"iota_transpose_perm");
}
lexer_.Lex();
if (!ParseToken(TokKind::kLparen,
"expected 'T(' to start sharding devices "
"iota_transpose_perm")) {
return false;
}
do {
int64_t dim;
if (!ParseInt64(&dim)) {
return false;
}
if (dim >= iota_reshape_dims.size()) {
return TokenError(absl::StrFormat(
"Out of range iota minor_to_major value %lld.", dim));
}
iota_transpose_perm.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (!ParseToken(TokKind::kRparen,
"expected ')' to end sharding devices "
"iota_transpose_perm")) {
return false;
}
}
} else {
if (!devices) {
return TokenError(
"Caller expected iota tile assignment when parsing, which should not "
"have any manual device entries.");
}
do {
int64_t device;
if (!ParseInt64(&device)) {
return false;
}
devices->push_back(device);
} while (EatIfPresent(TokKind::kComma));
}
return true;
}
bool HloParserImpl::ParseSingleSharding(std::optional<HloSharding>& sharding,
bool lbrace_pre_lexed) {
if (!lbrace_pre_lexed &&
!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
}
LocTy loc = lexer_.GetLoc();
bool maximal = false;
bool replicated = false;
bool manual = false;
bool unknown = false;
bool last_tile_dim_replicate = false;
bool last_tile_dims = false;
bool shard_like = false;
bool shard_as = false;
int64_t shard_group_id;
std::vector<int64_t> devices;
std::vector<int64_t> tile_assignment_dimensions;
std::vector<int64_t> iota_reshape_dims;
std::vector<int> iota_transpose_perm;
std::vector<OpSharding::Type> subgroup_types;
std::vector<OpMetadata> metadata;
while (lexer_.GetKind() != TokKind::kRbrace) {
switch (lexer_.GetKind()) {
case TokKind::kw_maximal:
maximal = true;
lexer_.Lex();
break;
case TokKind::kw_replicated:
replicated = true;
lexer_.Lex();
break;
case TokKind::kw_manual:
manual = true;
lexer_.Lex();
break;
case TokKind::kw_unknown:
unknown = true;
lexer_.Lex();
break;
case TokKind::kAttributeName: {
if (lexer_.GetStrVal() == "device") {
if (lexer_.Lex() != TokKind::kInt) {
return TokenError("device= attribute must be an integer");
}
devices = {lexer_.GetInt64Val()};
lexer_.Lex();
} else if (lexer_.GetStrVal() == "devices") {
lexer_.Lex();
if (!ParseTileAssignment(tile_assignment_dimensions,
iota_reshape_dims, iota_transpose_perm,
&devices)) {
return false;
}
} else if (lexer_.GetStrVal() == "metadata") {
lexer_.Lex();
if (!ParseSingleOrListMetadata(metadata)) {
return false;
}
} else if (lexer_.GetStrVal() == "last_tile_dims") {
last_tile_dims = true;
lexer_.Lex();
if (!ParseListShardingType(&subgroup_types)) {
return false;
}
} else {
return TokenError(
"unknown attribute in sharding: expected device=, devices= "
"metadata= or last_tile_dims= ");
}
break;
}
case TokKind::kw_last_tile_dim_replicate:
last_tile_dim_replicate = true;
lexer_.Lex();
break;
case TokKind::kw_shard_as: {
shard_as = true;
lexer_.Lex();
if (!ParseInt64(&shard_group_id)) {
return false;
}
break;
}
case TokKind::kw_shard_like: {
shard_like = true;
lexer_.Lex();
if (!ParseInt64(&shard_group_id)) {
return false;
}
break;
}
case TokKind::kRbrace:
break;
default:
return TokenError("unexpected token");
}
}
if (replicated) {
if (!devices.empty()) {
return Error(loc,
"replicated shardings should not have any devices assigned");
}
sharding = HloSharding::Replicate(metadata);
} else if (maximal) {
if (devices.size() != 1) {
return Error(loc,
"maximal shardings should have exactly one device assigned");
}
sharding = HloSharding::AssignDevice(devices[0], metadata);
} else if (manual) {
if (!devices.empty()) {
return Error(loc,
"manual shardings should not have any devices assigned");
}
sharding = HloSharding::Manual(metadata);
} else if (unknown) {
if (!devices.empty()) {
return Error(loc,
"unknown shardings should not have any devices assigned");
}
sharding = HloSharding::Unknown(metadata);
} else {
if (tile_assignment_dimensions.empty()) {
return Error(
loc,
"non-maximal shardings must have a tile assignment list including "
"dimensions");
}
if (iota_transpose_perm.size() != iota_reshape_dims.size()) {
return Error(loc,
absl::StrFormat(
"iota_transpose_perm should have the same rank as "
"iota_reshape_dims : expected %lld, saw %lld.",
iota_reshape_dims.size(), iota_transpose_perm.size()));
}
if (last_tile_dim_replicate) {
CHECK(subgroup_types.empty());
subgroup_types.push_back(OpSharding::REPLICATED);
}
if (!iota_reshape_dims.empty()) {
CHECK(devices.empty());
sharding =
subgroup_types.empty()
? HloSharding::IotaTile(tile_assignment_dimensions,
iota_reshape_dims, iota_transpose_perm,
metadata)
: HloSharding::Subgroup(
TileAssignment(tile_assignment_dimensions,
iota_reshape_dims, iota_transpose_perm),
subgroup_types, metadata);
} else {
if (devices.size() <= 1) {
return Error(
loc,
"non-maximal shardings must have more than one device assigned");
}
auto tiles = std::make_shared<Array<int64_t>>(tile_assignment_dimensions);
absl::c_copy(devices, tiles->begin());
sharding =
subgroup_types.empty()
? HloSharding::Tile(TileAssignment(std::move(tiles)), metadata)
: HloSharding::Subgroup(TileAssignment(std::move(tiles)),
subgroup_types, metadata);
}
}
if (shard_as || shard_like) {
sharding = sharding->SetShardGroup(
shard_as ? HloSharding::ShardAs(shard_group_id)
: HloSharding::ShardLike(shard_group_id));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseParameterReplication(
ParameterReplication* parameter_replication) {
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start parameter_replication attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (lexer_.GetKind() == TokKind::kw_true) {
parameter_replication->add_replicated_at_leaf_buffers(true);
} else if (lexer_.GetKind() == TokKind::kw_false) {
parameter_replication->add_replicated_at_leaf_buffers(false);
} else {
return false;
}
lexer_.Lex();
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace,
"expected '}' to end parameter_replication attribute");
}
bool HloParserImpl::ParseBooleanListOrSingleBoolean(BoolList* boolean_list) {
if (lexer_.GetKind() != TokKind::kLbrace &&
lexer_.GetKind() != TokKind::kw_true &&
lexer_.GetKind() != TokKind::kw_false) {
TokenError("Expected list of booleans or true/false value");
return false;
}
auto parse_boolean = [this, boolean_list]() {
if (lexer_.GetKind() == TokKind::kw_true) {
boolean_list->push_back(true);
lexer_.Lex();
return true;
} else if (lexer_.GetKind() == TokKind::kw_false) {
boolean_list->push_back(false);
lexer_.Lex();
return true;
}
return false;
};
if (parse_boolean()) {
return true;
}
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start boolean list attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (!parse_boolean()) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace,
"expected '}' to end boolean list attribute");
}
bool HloParserImpl::ParseReplicaGroupsOnly(
std::vector<ReplicaGroup>* replica_groups) {
std::vector<std::vector<int64_t>> result;
if (!ParseInt64ListList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
&result)) {
return false;
}
*replica_groups = CreateReplicaGroups(result);
return true;
}
bool HloParserImpl::ParseDomain(DomainData* domain) {
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<std::string> kind;
optional<HloSharding> entry_sharding;
optional<HloSharding> exit_sharding;
attrs["kind"] = {true, AttrTy::kString, &kind};
attrs["entry"] = {true, AttrTy::kSharding, &entry_sharding};
attrs["exit"] = {true, AttrTy::kSharding, &exit_sharding};
if (!ParseSubAttributes(attrs)) {
return false;
}
if (*kind == ShardingMetadata::KindName()) {
auto entry_sharding_ptr =
std::make_unique<HloSharding>(std::move(*entry_sharding));
auto exit_sharding_ptr =
std::make_unique<HloSharding>(std::move(*exit_sharding));
domain->entry_metadata =
std::make_unique<ShardingMetadata>(std::move(entry_sharding_ptr));
domain->exit_metadata =
std::make_unique<ShardingMetadata>(std::move(exit_sharding_ptr));
} else {
return TokenError(StrCat("unsupported domain kind: ", *kind));
}
return true;
}
bool HloParserImpl::ParseInstructionNames(
std::vector<HloInstruction*>* instructions) {
if (!ParseToken(TokKind::kLbrace,
"expects '{' at the beginning of instruction name list")) {
return false;
}
LocTy loc = lexer_.GetLoc();
do {
std::string name;
if (!ParseName(&name)) {
return Error(loc, "expects a instruction name");
}
std::pair<HloInstruction*, LocTy>* instr = FindInstruction(name);
if (!instr) {
return TokenError(StrFormat("instruction '%s' is not defined", name));
}
instructions->push_back(instr->first);
} while (EatIfPresent(TokKind::kComma));
return ParseToken(TokKind::kRbrace,
"expects '}' at the end of instruction name list");
}
template <typename T>
std::string StringifyValue(T val) {
if constexpr (is_complex_v<T>) {
return StrFormat("(%f, %f)", val.real(), val.imag());
} else {
return StrCat(val);
}
}
template <class T>
uint64_t GetNanPayload(T val) {
if constexpr (std::is_same_v<T, double>) {
auto rep = absl::bit_cast<uint64_t>(val);
if (auto payload = rep & NanPayloadBitMask<double>()) {
return payload;
}
return QuietNanWithoutPayload<double>();
} else {
static_assert(!std::numeric_limits<T>::has_quiet_NaN);
static_assert(!std::numeric_limits<T>::has_signaling_NaN);
return 0;
}
}
template <typename LiteralNativeT, typename LiteralComponentT>
LiteralNativeT LiteralNativeFromRealImag(LiteralComponentT real,
LiteralComponentT imag) {
if constexpr (std::is_same_v<LiteralNativeT,
std::complex<LiteralComponentT>>) {
return LiteralNativeT(real, imag);
} else {
return real;
}
}
template <typename T>
struct ComponentType {
using Type = T;
};
template <typename T>
struct ComponentType<std::complex<T>> {
using Type = T;
};
template <typename T>
T GetReal(T value) {
return value;
}
template <typename T>
T GetReal(std::complex<T> value) {
return value.real();
}
template <typename T>
T GetImag(T value) {
return 0;
}
template <typename T>
T GetImag(std::complex<T> value) {
return value.imag();
}
template <typename T>
struct MinMaxFiniteValue {
static constexpr T max() { return std::numeric_limits<T>::max(); }
static constexpr T min() { return std::numeric_limits<T>::lowest(); }
};
template <typename T>
bool IsFinite(T val) {
if constexpr (std::numeric_limits<T>::has_infinity ||
std::numeric_limits<T>::has_quiet_NaN ||
std::numeric_limits<T>::has_signaling_NaN) {
return Eigen::numext::isfinite(val);
} else {
return true;
}
}
template <typename LiteralNativeT, typename ParsedElemT>
bool HloParserImpl::CheckParsedValueIsInRange(LocTy loc, ParsedElemT value) {
if constexpr (std::is_floating_point_v<ParsedElemT>) {
auto value_as_native_t = static_cast<LiteralNativeT>(value);
auto value_double_converted = static_cast<ParsedElemT>(value_as_native_t);
if (!IsFinite(value) || IsFinite(value_double_converted)) {
value = value_double_converted;
}
}
PrimitiveType literal_ty =
primitive_util::NativeToPrimitiveType<LiteralNativeT>();
if (!IsFinite(value)) {
} else if constexpr (std::is_unsigned<LiteralNativeT>::value) {
static_assert(std::is_same_v<ParsedElemT, int64_t> ||
std::is_same_v<ParsedElemT, bool>,
"Unimplemented checking for ParsedElemT");
const uint64_t unsigned_value = value;
const uint64_t upper_bound =
static_cast<uint64_t>(std::numeric_limits<LiteralNativeT>::max());
if (unsigned_value > upper_bound) {
return Error(loc, StrCat("value ", value,
" is out of range for literal's primitive type ",
PrimitiveType_Name(literal_ty), " namely [0, ",
upper_bound, "]."));
}
} else if (value > static_cast<ParsedElemT>(
MinMaxFiniteValue<LiteralNativeT>::max()) ||
value < static_cast<ParsedElemT>(
MinMaxFiniteValue<LiteralNativeT>::min())) {
return Error(
loc,
StrCat(
"value ", value, " is out of range for literal's primitive type ",
PrimitiveType_Name(literal_ty), " namely [",
static_cast<ParsedElemT>(MinMaxFiniteValue<LiteralNativeT>::min()),
", ",
static_cast<ParsedElemT>(MinMaxFiniteValue<LiteralNativeT>::max()),
"]."));
}
return true;
}
template <typename LiteralNativeT>
bool HloParserImpl::CheckParsedValueIsInRange(LocTy loc,
std::complex<double> value) {
using LiteralComplexComponentT =
decltype(std::real(std::declval<LiteralNativeT>()));
auto check_component = [&](absl::string_view name, double v) {
if (!std::isfinite(v)) {
return true;
}
double min = MinMaxFiniteValue<LiteralComplexComponentT>::min();
double max = MinMaxFiniteValue<LiteralComplexComponentT>::max();
if (v < min || v > max) {
return Error(
loc,
StrCat(name, " part ", v,
" is out of range for literal's primitive type ",
PrimitiveType_Name(
primitive_util::NativeToPrimitiveType<LiteralNativeT>()),
", namely [", min, ", ", max, "]."));
}
return true;
};
return check_component("real", std::real(value)) &&
check_component("imaginary", std::imag(value));
}
template <typename LiteralNativeT, typename ParsedElemT>
bool HloParserImpl::SetValueInLiteralHelper(LocTy loc, ParsedElemT value,
int64_t index, Literal* literal) {
if (!CheckParsedValueIsInRange<LiteralNativeT>(loc, value)) {
return false;
}
if (index >= ShapeUtil::ElementsIn(literal->shape())) {
return Error(loc, StrCat("tries to set value ", StringifyValue(value),
" to a literal in shape ",
ShapeUtil::HumanString(literal->shape()),
" at linear index ", index,
", but the index is out of range"));
}
using ParsedElemComponentT = typename ComponentType<ParsedElemT>::Type;
using LiteralNativeComponentT = typename ComponentType<LiteralNativeT>::Type;
const auto handle_nan =
[this, literal, index, loc](
ParsedElemComponentT parsed_value_component,
LiteralNativeComponentT* literal_value_component) {
if (!std::isnan(static_cast<double>(parsed_value_component))) {
return true;
}
auto nan_payload = GetNanPayload(parsed_value_component);
if constexpr (NanPayloadBits<LiteralNativeComponentT>() > 0) {
if (nan_payload == QuietNanWithoutPayload<double>()) {
nan_payload = QuietNanWithoutPayload<LiteralNativeComponentT>();
}
const auto kLargestPayload =
NanPayloadBitMask<LiteralNativeComponentT>();
if (nan_payload > kLargestPayload) {
return Error(
loc, StrCat("tries to set NaN payload 0x",
absl::Hex(nan_payload), " to a literal in shape ",
ShapeUtil::HumanString(literal->shape()),
" at linear index ", index,
", but the NaN payload is out of range (0x",
absl::Hex(kLargestPayload), ")"));
}
*literal_value_component =
NanWithSignAndPayload<LiteralNativeComponentT>(
std::signbit(
static_cast<double>(parsed_value_component)),
nan_payload);
} else {
if (nan_payload != QuietNanWithoutPayload<double>()) {
return Error(
loc, StrCat("tries to set NaN payload 0x",
absl::Hex(nan_payload), " to a literal in shape ",
ShapeUtil::HumanString(literal->shape()),
" at linear index ", index, ", but ",
primitive_util::LowercasePrimitiveTypeName(
literal->shape().element_type()),
" does not support payloads"));
}
}
return true;
};
const ParsedElemComponentT parsed_real_value = GetReal(value);
auto literal_real_value =
static_cast<LiteralNativeComponentT>(parsed_real_value);
if (std::is_floating_point_v<ParsedElemT> ||
std::is_same_v<ParsedElemT, std::complex<double>>) {
if (!handle_nan(parsed_real_value, &literal_real_value)) {
return false;
}
}
const ParsedElemComponentT parsed_imag_value = GetImag(value);
auto literal_imag_value =
static_cast<LiteralNativeComponentT>(parsed_imag_value);
if constexpr (std::is_same_v<ParsedElemT, std::complex<double>>) {
if (!handle_nan(parsed_real_value, &literal_imag_value)) {
return false;
}
}
literal->data<LiteralNativeT>().at(index) =
LiteralNativeFromRealImag<LiteralNativeT>(literal_real_value,
literal_imag_value);
return true;
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, int64_t value, int64_t index,
Literal* literal) {
const Shape& shape = literal->shape();
return primitive_util::PrimitiveTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
if constexpr (primitive_type_constant == PRED) {
return SetValueInLiteralHelper<bool>(loc, static_cast<bool>(value),
index, literal);
}
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return SetValueInLiteralHelper<NativeT>(loc, value, index, literal);
}
LOG(FATAL) << "unknown integral primitive type "
<< PrimitiveType_Name(shape.element_type());
},
shape.element_type());
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, double value, int64_t index,
Literal* literal) {
const Shape& shape = literal->shape();
return primitive_util::PrimitiveTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return SetValueInLiteralHelper<NativeT>(loc, value, index, literal);
}
LOG(FATAL) << "unknown floating point primitive type "
<< PrimitiveType_Name(shape.element_type());
},
shape.element_type());
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, bool value, int64_t index,
Literal* literal) {
const Shape& shape = literal->shape();
switch (shape.element_type()) {
case PRED:
return SetValueInLiteralHelper<bool>(loc, value, index, literal);
default:
LOG(FATAL) << PrimitiveType_Name(shape.element_type())
<< " is not PRED type";
}
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, std::complex<double> value,
int64_t index, Literal* literal) {
const Shape& shape = literal->shape();
return primitive_util::PrimitiveTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return SetValueInLiteralHelper<NativeT>(loc, value, index, literal);
}
LOG(FATAL) << PrimitiveType_Name(shape.element_type())
<< " is not a complex type";
},
shape.element_type());
}
bool HloParserImpl::ParseLiteral(Literal* literal) {
if (lexer_.GetKind() == TokKind::kLparen) {
lexer_.Lex();
std::vector<Literal> elements;
while (lexer_.GetKind() != TokKind::kRparen) {
Literal element;
if (!ParseLiteral(&element)) {
return TokenError("Fails when parsing tuple element");
}
elements.emplace_back(std::move(element));
if (lexer_.GetKind() != TokKind::kRparen) {
ParseToken(TokKind::kComma, "expects ',' to separate tuple elements");
}
}
*literal = LiteralUtil::MakeTupleOwned(std::move(elements));
return ParseToken(TokKind::kRparen, "expects ')' to close a tuple literal");
}
Shape literal_shape;
if (!ParseShape(&literal_shape)) {
return false;
}
return ParseLiteral(literal, literal_shape);
}
bool HloParserImpl::ParseLiteral(Literal* literal, const Shape& shape) {
return shape.IsTuple() ? ParseTupleLiteral(literal, shape)
: ParseNonTupleLiteral(literal, shape);
}
bool HloParserImpl::ParseTupleLiteral(Literal* literal, const Shape& shape) {
if (!ParseToken(TokKind::kLparen, "expects '(' in front of tuple elements")) {
return false;
}
std::vector<Literal> elements(ShapeUtil::TupleElementCount(shape));
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
for (int i = 0; i < elements.size(); i++) {
if (i > 0) {
ParseToken(TokKind::kComma, "expects ',' to separate tuple elements");
}
if (!ParseLiteral(&elements[i],
ShapeUtil::GetTupleElementShape(shape, i))) {
return TokenError(StrCat("expects the ", i, "th element"));
}
}
}
*literal = LiteralUtil::MakeTupleOwned(std::move(elements));
return ParseToken(TokKind::kRparen,
StrCat("expects ')' at the end of the tuple with ",
ShapeUtil::TupleElementCount(shape), "elements"));
}
bool HloParserImpl::ParseNonTupleLiteral(Literal* literal, const Shape& shape) {
CHECK(LayoutUtil::IsDenseArray(shape)) << shape.ToString(true);
return ParseDenseLiteral(literal, shape);
}
bool HloParserImpl::ParseDenseLiteral(Literal* literal, const Shape& shape) {
const int rank = static_cast<int>(shape.rank());
*literal = LiteralUtil::CreateFromDimensions(shape.element_type(),
shape.dimensions());
int64_t nest_level = 0;
int64_t linear_index = 0;
std::vector<int64_t> elems_seen_per_dim(rank);
auto get_index_str = [&elems_seen_per_dim](int dim) -> std::string {
std::vector<int64_t> elems_seen_until_dim(elems_seen_per_dim.begin(),
elems_seen_per_dim.begin() + dim);
return StrCat("[",
StrJoin(elems_seen_until_dim, ",",
[](std::string* out, const int64_t num_elems) {
StrAppend(out, num_elems - 1);
}),
"]");
};
auto add_one_elem_seen = [&] {
if (rank > 0) {
if (nest_level != rank) {
return TokenError(absl::StrFormat(
"expects nested array in rank %d, but sees %d", rank, nest_level));
}
elems_seen_per_dim[rank - 1]++;
if (elems_seen_per_dim[rank - 1] > shape.dimensions(rank - 1)) {
return TokenError(absl::StrFormat(
"expects %d elements on the minor-most dimension, but "
"sees more",
shape.dimensions(rank - 1)));
}
}
return true;
};
do {
switch (lexer_.GetKind()) {
default:
return TokenError("unexpected token type in a literal");
case TokKind::kLbrace: {
nest_level++;
if (nest_level > rank) {
return TokenError(absl::StrFormat(
"expects nested array in rank %d, but sees larger", rank));
}
if (nest_level > 1) {
elems_seen_per_dim[nest_level - 2]++;
if (elems_seen_per_dim[nest_level - 2] >
shape.dimensions(nest_level - 2)) {
return TokenError(absl::StrFormat(
"expects %d elements in the %sth element, but sees more",
shape.dimensions(nest_level - 2),
get_index_str(nest_level - 2)));
}
}
lexer_.Lex();
break;
}
case TokKind::kRbrace: {
if (nest_level == 0) {
return TokenError("unexpected '}' token");
}
nest_level--;
if (elems_seen_per_dim[nest_level] != shape.dimensions(nest_level)) {
return TokenError(absl::StrFormat(
"expects %d elements in the %sth element, but sees %d",
shape.dimensions(nest_level), get_index_str(nest_level),
elems_seen_per_dim[nest_level]));
}
elems_seen_per_dim[nest_level] = 0;
lexer_.Lex();
break;
}
case TokKind::kLparen: {
if (!primitive_util::IsComplexType(shape.element_type())) {
return TokenError(
absl::StrFormat("unexpected '(' in literal. Parens are only "
"valid for complex literals"));
}
std::complex<double> value;
LocTy loc = lexer_.GetLoc();
if (!add_one_elem_seen() || !ParseComplex(&value) ||
!SetValueInLiteral(loc, value, linear_index++, literal)) {
return false;
}
break;
}
case TokKind::kDots: {
if (nest_level != 1) {
return TokenError(absl::StrFormat(
"expects `...` at nest level 1, but sees it at nest level %d",
nest_level));
}
elems_seen_per_dim[0] = shape.dimensions(0);
lexer_.Lex();
static uint32_t data = 0;
static_assert(sizeof(bool) == 1);
constexpr uint32_t kBooleanMask = 0x01010101;
constexpr uint32_t kNoMask = 0xFFFFFFFF;
const uint32_t mask =
(shape.element_type() == PRED) ? kBooleanMask : kNoMask;
uint32_t* raw_data = static_cast<uint32_t*>(literal->untyped_data());
for (int64_t i = 0; i < literal->size_bytes() / 4; ++i) {
raw_data[i] = data++ & mask;
}
uint8_t* raw_data_int8 = static_cast<uint8_t*>(literal->untyped_data());
static uint8_t data_int8 = 0;
for (int64_t i = 0; i < literal->size_bytes() % 4; ++i) {
raw_data_int8[literal->size_bytes() / 4 + i] = data_int8++ & mask;
}
break;
}
case TokKind::kComma:
lexer_.Lex();
break;
case TokKind::kw_true:
case TokKind::kw_false:
case TokKind::kInt:
case TokKind::kDecimal:
case TokKind::kw_inf:
case TokKind::kNegInf: {
add_one_elem_seen();
if (lexer_.GetKind() == TokKind::kw_true ||
lexer_.GetKind() == TokKind::kw_false) {
if (!SetValueInLiteral(lexer_.GetLoc(),
lexer_.GetKind() == TokKind::kw_true,
linear_index++, literal)) {
return false;
}
lexer_.Lex();
} else if (primitive_util::IsIntegralType(shape.element_type()) ||
shape.element_type() == PRED) {
LocTy loc = lexer_.GetLoc();
int64_t value;
if (!ParseInt64(&value)) {
return Error(loc, StrCat("expects integer for primitive type: ",
PrimitiveType_Name(shape.element_type())));
}
if (!SetValueInLiteral(loc, value, linear_index++, literal)) {
return false;
}
} else if (primitive_util::IsFloatingPointType(shape.element_type())) {
LocTy loc = lexer_.GetLoc();
double value;
if (!ParseDouble(&value)) {
return Error(
loc, StrCat("expect floating point value for primitive type: ",
PrimitiveType_Name(shape.element_type())));
}
if (!SetValueInLiteral(loc, value, linear_index++, literal)) {
return false;
}
} else {
return TokenError(StrCat("unsupported primitive type ",
PrimitiveType_Name(shape.element_type())));
}
break;
}
}
} while (nest_level > 0);
*literal = literal->Relayout(shape.layout());
return true;
}
bool HloParserImpl::ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder) {
CHECK(operands != nullptr);
if (!ParseToken(TokKind::kLparen,
"expects '(' at the beginning of operands")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
do {
HloLexer lexer_copy = lexer_;
std::vector<std::string> saved_errors;
std::swap(saved_errors, error_);
bool is_normal_operand = [&] {
LocTy loc = lexer_.GetLoc();
std::string name;
optional<Shape> shape;
if (CanBeShape()) {
shape.emplace();
if (!ParseShape(&shape.value())) {
return false;
}
}
if (!ParseName(&name)) {
if (shape.has_value() && create_missing_instruction_ != nullptr &&
scoped_name_tables_.size() == 1) {
name = "";
} else {
return false;
}
}
std::pair<HloInstruction*, LocTy>* instruction =
FindInstruction(name, shape);
if (instruction == nullptr) {
return Error(loc, StrCat("instruction does not exist: ", name));
}
auto next = lexer_.GetKind();
if (next != TokKind::kComma && next != TokKind::kRparen) {
return false;
}
operands->push_back(instruction->first);
return true;
}();
if (is_normal_operand) {
error_ = std::move(saved_errors);
continue;
}
std::vector<std::string> normal_operand_errors;
std::swap(error_, normal_operand_errors);
lexer_ = lexer_copy;
LocTy loc = lexer_.GetLoc();
bool is_nested_instruction = ParseInstructionRhs(
builder, "", loc, false);
if (is_nested_instruction) {
operands->push_back(builder->last_added_instruction());
error_ = std::move(saved_errors);
continue;
}
std::vector<std::string> nested_instruction_errors;
std::swap(error_, nested_instruction_errors);
error_ = std::move(saved_errors);
Error(loc,
"cannot parse as an instruction name or as a nested instruction:");
error_.insert(error_.end(),
std::make_move_iterator(normal_operand_errors.begin()),
std::make_move_iterator(normal_operand_errors.end()));
error_.insert(error_.end(),
std::make_move_iterator(nested_instruction_errors.begin()),
std::make_move_iterator(nested_instruction_errors.end()));
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRparen, "expects ')' at the end of operands");
}
bool HloParserImpl::ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder,
const int expected_size) {
CHECK(operands != nullptr);
LocTy loc = lexer_.GetLoc();
if (!ParseOperands(operands, builder)) {
return false;
}
if (expected_size != operands->size()) {
return Error(loc, StrCat("expects ", expected_size, " operands, but has ",
operands->size(), " operands"));
}
return true;
}
bool HloParserImpl::ParseSubAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs) {
LocTy loc = lexer_.GetLoc();
if (!ParseToken(TokKind::kLbrace, "expects '{' to start sub attributes")) {
return false;
}
absl::flat_hash_set<std::string> seen_attrs;
if (lexer_.GetKind() == TokKind::kRbrace) {
} else {
do {
EatIfPresent(TokKind::kComma);
if (!ParseAttributeHelper(attrs, &seen_attrs)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kRbrace);
}
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
return Error(loc, StrFormat("sub-attribute %s is expected but not seen",
attr_it.first));
}
}
return ParseToken(TokKind::kRbrace, "expects '}' to end sub attributes");
}
bool HloParserImpl::ParseAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes, const std::optional<Shape>& shape) {
LocTy loc = lexer_.GetLoc();
absl::flat_hash_set<std::string> seen_attrs;
if (allow_attributes) {
while (EatIfPresent(TokKind::kComma)) {
if (!ParseAttributeHelper(attrs, &seen_attrs, shape)) {
return false;
}
}
}
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
return Error(loc, StrFormat("attribute %s is expected but not seen",
attr_it.first));
}
}
return true;
}
bool HloParserImpl::ParseAttributeHelper(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
absl::flat_hash_set<std::string>* seen_attrs,
const std::optional<Shape>& shape) {
LocTy loc = lexer_.GetLoc();
std::string name;
if (!ParseAttributeName(&name)) {
return Error(loc, "error parsing attributes");
}
VLOG(kDebugLevel) << "Parsing attribute " << name;
if (!seen_attrs->insert(name).second) {
return Error(loc, StrFormat("attribute %s already exists", name));
}
auto attr_it = attrs.find(name);
if (attr_it == attrs.end()) {
std::string allowed_attrs;
if (attrs.empty()) {
allowed_attrs = "No attributes are allowed here.";
} else {
allowed_attrs =
StrCat("Allowed attributes: ",
StrJoin(attrs, ", ",
[&](std::string* out,
const std::pair<std::string, AttrConfig>& kv) {
StrAppend(out, kv.first);
}));
}
return Error(
loc, StrFormat("unexpected attribute \"%s\". %s", name, allowed_attrs));
}
AttrTy attr_type = attr_it->second.attr_type;
void* attr_out_ptr = attr_it->second.result;
bool success = [&] {
LocTy attr_loc = lexer_.GetLoc();
switch (attr_type) {
case AttrTy::kBool: {
bool result;
if (!ParseBool(&result)) {
return false;
}
static_cast<optional<bool>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kBracedBoolListOrBool: {
if (!ParseBooleanListOrSingleBoolean(
static_cast<BoolList*>(attr_out_ptr))) {
return false;
}
return true;
}
case AttrTy::kInt64: {
int64_t result;
if (!ParseInt64(&result)) {
return false;
}
static_cast<optional<int64_t>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kInt32: {
int64_t result;
if (!ParseInt64(&result)) {
return false;
}
if (result != static_cast<int32_t>(result)) {
return Error(attr_loc, "value out of range for int32_t");
}
static_cast<optional<int32_t>*>(attr_out_ptr)
->emplace(static_cast<int32_t>(result));
return true;
}
case AttrTy::kFloat: {
double result;
if (!ParseDouble(&result)) {
return false;
}
if (result > std::numeric_limits<float>::max() ||
result < std::numeric_limits<float>::lowest()) {
return Error(attr_loc, "value out of range for float");
}
static_cast<optional<float>*>(attr_out_ptr)
->emplace(static_cast<float>(result));
return true;
}
case AttrTy::kHloComputation: {
HloComputation* result = nullptr;
if (!ParseHloComputation(&result)) {
return false;
}
static_cast<optional<HloComputation*>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kBracedHloComputationList: {
std::vector<HloComputation*> result;
if (!ParseHloComputationList(&result)) {
return false;
}
static_cast<optional<std::vector<HloComputation*>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kFftType: {
FftType result;
if (!ParseFftType(&result)) {
return false;
}
static_cast<optional<FftType>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPaddingType: {
PaddingType result;
if (!ParsePaddingType(&result)) {
return false;
}
static_cast<optional<PaddingType>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kComparisonDirection: {
ComparisonDirection result;
if (!ParseComparisonDirection(&result)) {
return false;
}
static_cast<optional<ComparisonDirection>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kComparisonType: {
Comparison::Type result;
if (!ParseComparisonType(&result)) {
return false;
}
static_cast<optional<Comparison::Type>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kEnum: {
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects an enumeration value");
}
std::string result = lexer_.GetStrVal();
lexer_.Lex();
static_cast<optional<std::string>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kWindow: {
Window result;
if (!ParseWindow(&result, true)) {
return false;
}
static_cast<optional<Window>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kConvolutionDimensionNumbers: {
ConvolutionDimensionNumbers result;
if (!ParseConvolutionDimensionNumbers(&result)) {
return false;
}
static_cast<optional<ConvolutionDimensionNumbers>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSharding: {
std::optional<HloSharding> sharding;
if (!ParseSharding(sharding)) {
return false;
}
static_cast<optional<HloSharding>*>(attr_out_ptr)
->emplace(std::move(*sharding));
return true;
}
case AttrTy::kCollectiveDeviceList: {
CollectiveDeviceList device_list;
if (!ParseCollectiveDeviceList(&device_list)) {
return false;
}
*(static_cast<CollectiveDeviceList*>(attr_out_ptr)) = device_list;
return true;
}
case AttrTy::kFrontendAttributes: {
FrontendAttributes frontend_attributes;
if (!ParseFrontendAttributes(&frontend_attributes)) {
return false;
}
static_cast<optional<FrontendAttributes>*>(attr_out_ptr)
->emplace(frontend_attributes);
return true;
}
case AttrTy::kStatisticsViz: {
StatisticsViz statistics_viz;
if (!ParseStatisticsViz(&statistics_viz)) {
return false;
}
static_cast<optional<StatisticsViz>*>(attr_out_ptr)
->emplace(statistics_viz);
return true;
}
case AttrTy::kParameterReplication: {
ParameterReplication parameter_replication;
if (!ParseParameterReplication(¶meter_replication)) {
return false;
}
static_cast<optional<ParameterReplication>*>(attr_out_ptr)
->emplace(parameter_replication);
return true;
}
case AttrTy::kInstructionList: {
std::vector<HloInstruction*> result;
if (!ParseInstructionNames(&result)) {
return false;
}
static_cast<optional<std::vector<HloInstruction*>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kFusionKind: {
HloInstruction::FusionKind result;
if (!ParseFusionKind(&result)) {
return false;
}
static_cast<optional<HloInstruction::FusionKind>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kBracedInt64List: {
std::vector<int64_t> result;
if (!ParseInt64List(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
&result)) {
return false;
}
static_cast<optional<std::vector<int64_t>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kBracedInt64ListList: {
std::vector<std::vector<int64_t>> result;
if (!ParseInt64ListList(TokKind::kLbrace, TokKind::kRbrace,
TokKind::kComma, &result)) {
return false;
}
static_cast<optional<std::vector<std::vector<int64_t>>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSliceRanges: {
SliceRanges result;
if (!ParseSliceRanges(&result)) {
return false;
}
static_cast<optional<SliceRanges>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPaddingConfig: {
PaddingConfig result;
if (!ParsePaddingConfig(&result)) {
return false;
}
static_cast<optional<PaddingConfig>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kString: {
std::string result;
if (!ParseString(&result)) {
return false;
}
static_cast<optional<std::string>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kStringOrJsonDict: {
std::string result;
if (lexer_.GetKind() == TokKind::kString) {
if (!ParseString(&result)) {
return false;
}
} else if (lexer_.GetKind() == TokKind::kLbrace) {
if (!ParseJsonDict(&result)) {
return false;
}
} else {
return false;
}
static_cast<optional<std::string>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kOriginalValue: {
if (!shape) {
return TokenError("expects instruction shape");
}
return ParseOriginalValue(
static_cast<optional<std::shared_ptr<OriginalValue>>*>(
attr_out_ptr),
*shape);
}
case AttrTy::kMetadata: {
OpMetadata result;
if (!ParseMetadata(result)) {
return false;
}
static_cast<optional<OpMetadata>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kDistribution: {
RandomDistribution result;
if (!ParseRandomDistribution(&result)) {
return false;
}
static_cast<optional<RandomDistribution>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kDomain: {
return ParseDomain(static_cast<DomainData*>(attr_out_ptr));
}
case AttrTy::kPrecisionList: {
std::vector<PrecisionConfig::Precision> result;
if (!ParsePrecisionList(&result)) {
return false;
}
static_cast<optional<std::vector<PrecisionConfig::Precision>>*>(
attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kShape: {
Shape result;
if (!ParseShape(&result)) {
return false;
}
static_cast<optional<Shape>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kShapeList: {
std::vector<Shape> result;
if (!ParseShapeList(&result)) {
return false;
}
static_cast<optional<std::vector<Shape>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kRandomAlgorithm: {
RandomAlgorithm result;
if (!ParseRandomAlgorithm(&result)) {
return false;
}
static_cast<optional<RandomAlgorithm>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPrecisionAlgorithm: {
PrecisionConfig::Algorithm result;
if (!ParseAlgorithm(&result)) {
return false;
}
static_cast<optional<PrecisionConfig::Algorithm>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kAliasing: {
AliasingData aliasing_data;
if (!ParseAliasing(&aliasing_data)) {
return false;
}
static_cast<optional<AliasingData>*>(attr_out_ptr)
->emplace(aliasing_data);
return true;
}
case AttrTy::kBufferDonor: {
BufferDonor buffer_donor;
if (!ParseBufferDonor(&buffer_donor)) {
return false;
}
static_cast<optional<BufferDonor>*>(attr_out_ptr)
->emplace(buffer_donor);
return true;
}
case AttrTy::kComputationLayout: {
ComputationLayout computation_layout(ShapeLayout(Shape{}));
if (!ParseComputationLayout(&computation_layout)) {
return false;
}
static_cast<optional<ComputationLayout>*>(attr_out_ptr)
->emplace(computation_layout);
return true;
}
case AttrTy::kInstructionAliasing: {
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
aliasing_output_operand_pairs;
if (!ParseInstructionOutputOperandAliasing(
&aliasing_output_operand_pairs)) {
return false;
}
static_cast<optional<std::vector<
std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>>*>(
attr_out_ptr)
->emplace(std::move(aliasing_output_operand_pairs));
return true;
}
case AttrTy::kLiteral: {
Literal result;
if (!ParseLiteral(&result)) {
return false;
}
static_cast<optional<Literal>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kCustomCallSchedule: {
CustomCallSchedule result;
if (!ParseCustomCallSchedule(&result)) {
return false;
}
static_cast<optional<CustomCallSchedule>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kCustomCallApiVersion: {
CustomCallApiVersion result;
if (!ParseCustomCallApiVersion(&result)) {
return false;
}
static_cast<optional<CustomCallApiVersion>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSparsityDescriptor: {
std::vector<SparsityDescriptor> result;
if (!ParseSparsityDescriptor(&result)) {
return false;
}
*static_cast<std::vector<SparsityDescriptor>*>(attr_out_ptr) =
std::move(result);
return true;
}
}
}();
if (!success) {
return Error(loc, StrFormat("error parsing attribute %s", name));
}
return true;
}
bool HloParserImpl::CopyAttributeToProtoMessage(
absl::flat_hash_set<std::string> non_proto_attrs,
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
tsl::protobuf::Message* message) {
const tsl::protobuf::Descriptor* descriptor = message->GetDescriptor();
const tsl::protobuf::Reflection* reflection = message->GetReflection();
for (const auto& p : attrs) {
const std::string& name = p.first;
if (non_proto_attrs.find(name) != non_proto_attrs.end()) {
continue;
}
const tsl::protobuf::FieldDescriptor* fd =
descriptor->FindFieldByName(name);
if (!fd) {
std::string allowed_attrs = "Allowed attributes: ";
for (int i = 0; i < descriptor->field_count(); ++i) {
if (i == 0) {
absl::StrAppend(&allowed_attrs, descriptor->field(i)->name());
} else {
absl::StrAppend(&allowed_attrs, ", ", descriptor->field(i)->name());
}
}
return TokenError(
StrFormat("unexpected attribute \"%s\". %s", name, allowed_attrs));
}
CHECK(!fd->is_repeated());
bool success = [&] {
switch (fd->type()) {
case tsl::protobuf::FieldDescriptor::TYPE_BOOL: {
auto attr_value = static_cast<optional<bool>*>(p.second.result);
if (attr_value->has_value()) {
reflection->SetBool(message, fd, **attr_value);
}
return true;
}
case tsl::protobuf::FieldDescriptor::TYPE_ENUM: {
auto attr_value =
static_cast<optional<std::string>*>(p.second.result);
if (attr_value->has_value()) {
const tsl::protobuf::EnumValueDescriptor* evd =
fd->enum_type()->FindValueByName(**attr_value);
reflection->SetEnum(message, fd, evd);
}
return true;
}
default:
return false;
}
}();
if (!success) {
return TokenError(StrFormat("error parsing attribute %s", name));
}
}
return true;
}
bool HloParserImpl::ParseAttributesAsProtoMessage(
const absl::flat_hash_map<std::string, AttrConfig>& non_proto_attrs,
tsl::protobuf::Message* message) {
const tsl::protobuf::Descriptor* descriptor = message->GetDescriptor();
absl::flat_hash_map<std::string, AttrConfig> attrs;
std::vector<optional<bool>> bool_params;
std::vector<optional<std::string>> string_params;
bool_params.reserve(descriptor->field_count());
string_params.reserve(descriptor->field_count());
for (int field_idx = 0; field_idx < descriptor->field_count(); field_idx++) {
const tsl::protobuf::FieldDescriptor* fd = descriptor->field(field_idx);
absl::string_view field_name = fd->name();
switch (fd->type()) {
case tsl::protobuf::FieldDescriptor::TYPE_BOOL: {
bool_params.emplace_back(std::nullopt);
attrs[field_name] = { false, AttrTy::kBool,
&bool_params.back()};
break;
}
case tsl::protobuf::FieldDescriptor::TYPE_ENUM: {
string_params.emplace_back(std::nullopt);
attrs[field_name] = { false, AttrTy::kEnum,
&string_params.back()};
break;
}
default:
return TokenError(absl::StrFormat(
"Unexpected protocol buffer type: %s ", fd->DebugString()));
}
}
absl::flat_hash_set<std::string> non_proto_attrs_names;
non_proto_attrs_names.reserve(non_proto_attrs.size());
for (const auto& p : non_proto_attrs) {
const std::string& attr_name = p.first;
if (attrs.find(attr_name) == attrs.end()) {
non_proto_attrs_names.insert(attr_name);
attrs[attr_name] = p.second;
}
}
if (!ParseAttributes(attrs)) {
return false;
}
return CopyAttributeToProtoMessage(non_proto_attrs_names, attrs, message);
}
bool HloParserImpl::ParseComputationName(HloComputation** value) {
std::string name;
LocTy loc = lexer_.GetLoc();
if (!ParseName(&name)) {
return Error(loc, "expects computation name");
}
std::pair<HloComputation*, LocTy>* computation =
tsl::gtl::FindOrNull(computation_pool_, name);
if (computation == nullptr) {
return Error(loc, StrCat("computation does not exist: ", name));
}
*value = computation->first;
return true;
}
bool HloParserImpl::ParseWindow(Window* window, bool expect_outer_curlies) {
LocTy loc = lexer_.GetLoc();
if (expect_outer_curlies &&
!ParseToken(TokKind::kLbrace, "expected '{' to start window attribute")) {
return false;
}
std::vector<int64_t> size;
std::vector<int64_t> stride;
std::vector<std::vector<int64_t>> pad;
std::vector<int64_t> lhs_dilate;
std::vector<int64_t> rhs_dilate;
std::vector<int64_t> rhs_reversal;
const auto end_token =
expect_outer_curlies ? TokKind::kRbrace : TokKind::kEof;
while (lexer_.GetKind() != end_token) {
LocTy attr_loc = lexer_.GetLoc();
std::string field_name;
if (!ParseAttributeName(&field_name)) {
return Error(attr_loc, "expects sub-attributes in window");
}
bool ok = [&] {
if (field_name == "size") {
return ParseDxD("size", &size);
}
if (field_name == "stride") {
return ParseDxD("stride", &stride);
}
if (field_name == "lhs_dilate") {
return ParseDxD("lhs_dilate", &lhs_dilate);
}
if (field_name == "rhs_dilate") {
return ParseDxD("rls_dilate", &rhs_dilate);
}
if (field_name == "pad") {
return ParseWindowPad(&pad);
}
if (field_name == "rhs_reversal") {
return ParseDxD("rhs_reversal", &rhs_reversal);
}
return Error(attr_loc, StrCat("unexpected attribute name: ", field_name));
}();
if (!ok) {
return false;
}
}
if (!stride.empty() && stride.size() != size.size()) {
return Error(loc, "expects 'stride=' has the same size as 'size='");
}
if (!lhs_dilate.empty() && lhs_dilate.size() != size.size()) {
return Error(loc, "expects 'lhs_dilate=' has the same size as 'size='");
}
if (!rhs_dilate.empty() && rhs_dilate.size() != size.size()) {
return Error(loc, "expects 'rhs_dilate=' has the same size as 'size='");
}
if (!pad.empty() && pad.size() != size.size()) {
return Error(loc, "expects 'pad=' has the same size as 'size='");
}
for (int i = 0; i < size.size(); i++) {
window->add_dimensions()->set_size(size[i]);
if (!pad.empty()) {
window->mutable_dimensions(i)->set_padding_low(pad[i][0]);
window->mutable_dimensions(i)->set_padding_high(pad[i][1]);
}
window->mutable_dimensions(i)->set_stride(stride.empty() ? 1 : stride[i]);
window->mutable_dimensions(i)->set_base_dilation(
lhs_dilate.empty() ? 1 : lhs_dilate[i]);
window->mutable_dimensions(i)->set_window_dilation(
rhs_dilate.empty() ? 1 : rhs_dilate[i]);
window->mutable_dimensions(i)->set_window_reversal(
rhs_reversal.empty() ? false : (rhs_reversal[i] == 1));
}
return !expect_outer_curlies ||
ParseToken(TokKind::kRbrace, "expected '}' to end window attribute");
}
bool HloParserImpl::ParseConvolutionDimensionNumbers(
ConvolutionDimensionNumbers* dnums) {
if (lexer_.GetKind() != TokKind::kDimLabels) {
return TokenError("expects dim labels pattern, e.g., 'bf0_0io->0bf'");
}
std::string str = lexer_.GetStrVal();
std::vector<std::string> split1 = absl::StrSplit(str, '_');
if (split1.size() != 2) {
LOG(FATAL) << "expects 3 items: lhs, rhs, and output dims, but sees "
<< str;
}
std::vector<std::string> split2 = absl::StrSplit(split1[1], "->");
if (split2.size() != 2) {
LOG(FATAL) << "expects 3 items: lhs, rhs, and output dims, but sees "
<< str;
}
absl::string_view lhs = split1[0];
absl::string_view rhs = split2[0];
absl::string_view out = split2[1];
auto is_unique = [](absl::string_view str) -> bool {
absl::flat_hash_set<char> chars;
for (char c : str) {
if (c == '?') {
continue;
}
if (!chars.insert(c).second) {
return false;
}
}
return true;
};
{
if (!is_unique(lhs)) {
return TokenError(
StrCat("expects unique lhs dimension numbers, but sees ", lhs));
}
for (char c : lhs) {
if (c != 'b' && c != 'f' && c != '?') {
dnums->add_input_spatial_dimensions(-1);
}
}
for (int i = 0; i < lhs.size(); i++) {
char c = lhs[i];
if (c == '?') {
continue;
} else if (c == 'b') {
dnums->set_input_batch_dimension(i);
} else if (c == 'f') {
dnums->set_input_feature_dimension(i);
} else if (c < '0' + lhs.size() && c >= '0') {
dnums->set_input_spatial_dimensions(c - '0', i);
} else {
return TokenError(StrFormat(
"expects [0-%dbf?] in lhs dimension numbers", lhs.size() - 1));
}
}
}
{
if (!is_unique(rhs)) {
return TokenError(
StrCat("expects unique rhs dimension numbers, but sees ", rhs));
}
for (char c : rhs) {
if (c != 'i' && c != 'o' && c != '?') {
dnums->add_kernel_spatial_dimensions(-1);
}
}
for (int i = 0; i < rhs.size(); i++) {
char c = rhs[i];
if (c == '?') {
continue;
} else if (c == 'i') {
dnums->set_kernel_input_feature_dimension(i);
} else if (c == 'o') {
dnums->set_kernel_output_feature_dimension(i);
} else if (c < '0' + rhs.size() && c >= '0') {
dnums->set_kernel_spatial_dimensions(c - '0', i);
} else {
return TokenError(StrFormat(
"expects [0-%dio?] in rhs dimension numbers", rhs.size() - 1));
}
}
}
{
if (!is_unique(out)) {
return TokenError(
StrCat("expects unique output dimension numbers, but sees ", out));
}
for (char c : out) {
if (c != 'b' && c != 'f' && c != '?') {
dnums->add_output_spatial_dimensions(-1);
}
}
for (int i = 0; i < out.size(); i++) {
char c = out[i];
if (c == '?') {
continue;
} else if (c == 'b') {
dnums->set_output_batch_dimension(i);
} else if (c == 'f') {
dnums->set_output_feature_dimension(i);
} else if (c < '0' + out.size() && c >= '0') {
dnums->set_output_spatial_dimensions(c - '0', i);
} else {
return TokenError(StrFormat(
"expects [0-%dbf?] in output dimension numbers", out.size() - 1));
}
}
}
if (dnums->input_spatial_dimensions_size() !=
dnums->output_spatial_dimensions_size() ||
dnums->input_spatial_dimensions_size() !=
dnums->kernel_spatial_dimensions_size()) {
return TokenError(
StrFormat("input, kernel, and output must have same number of spatial "
"dimensions, but got %d, %d, %d, respectively.",
dnums->input_spatial_dimensions_size(),
dnums->kernel_spatial_dimensions_size(),
dnums->output_spatial_dimensions_size()));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseSliceRanges(SliceRanges* result) {
if (!ParseToken(TokKind::kLbrace, "expects '{' to start ranges")) {
return false;
}
std::vector<std::vector<int64_t>> ranges;
if (lexer_.GetKind() == TokKind::kRbrace) {
return ParseToken(TokKind::kRbrace, "expects '}' to end ranges");
}
do {
LocTy loc = lexer_.GetLoc();
ranges.emplace_back();
if (!ParseInt64List(TokKind::kLsquare, TokKind::kRsquare, TokKind::kColon,
&ranges.back())) {
return false;
}
const auto& range = ranges.back();
if (range.size() != 2 && range.size() != 3) {
return Error(loc,
StrFormat("expects [start:limit:step] or [start:limit], "
"but sees %d elements.",
range.size()));
}
} while (EatIfPresent(TokKind::kComma));
for (const auto& range : ranges) {
result->starts.push_back(range[0]);
result->limits.push_back(range[1]);
result->strides.push_back(range.size() == 3 ? range[2] : 1);
}
return ParseToken(TokKind::kRbrace, "expects '}' to end ranges");
}
bool HloParserImpl::ParsePrecisionList(
std::vector<PrecisionConfig::Precision>* result) {
auto parse_and_add_item = [&]() {
PrecisionConfig::Precision item;
if (!ParsePrecision(&item)) {
return false;
}
result->push_back(item);
return true;
};
return ParseList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseHloComputation(HloComputation** result) {
if (lexer_.GetKind() == TokKind::kLbrace) {
return ParseInstructionList(result, "_");
}
return ParseComputationName(result);
}
bool HloParserImpl::ParseHloComputationList(
std::vector<HloComputation*>* result) {
auto parse_and_add_item = [&]() {
HloComputation* computation;
if (!ParseHloComputation(&computation)) {
return false;
}
VLOG(kDebugLevel) << "parsed computation " << computation->name();
result->push_back(computation);
return true;
};
return ParseList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseShapeList(std::vector<Shape>* result) {
auto parse_and_add_item = [&]() {
Shape shape;
if (!ParseShape(&shape)) {
return false;
}
result->push_back(std::move(shape));
return true;
};
return ParseList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseInt64List(const TokKind start, const TokKind end,
const TokKind delim,
std::vector<int64_t>* result) {
auto parse_and_add_item = [&]() {
int64_t i;
if (!ParseInt64(&i)) {
return false;
}
result->push_back(i);
return true;
};
return ParseList(start, end, delim, parse_and_add_item);
}
bool HloParserImpl::ParseInt64ListList(
const TokKind start, const TokKind end, const TokKind delim,
std::vector<std::vector<int64_t>>* result) {
auto parse_and_add_item = [&]() {
std::vector<int64_t> item;
if (!ParseInt64List(start, end, delim, &item)) {
return false;
}
result->push_back(item);
return true;
};
return ParseList(start, end, delim, parse_and_add_item);
}
bool HloParserImpl::ParseList(const TokKind start, const TokKind end,
const TokKind delim,
absl::FunctionRef<bool()> parse_and_add_item) {
if (!ParseToken(start, StrCat("expects a list starting with ",
TokKindToString(start)))) {
return false;
}
if (lexer_.GetKind() == end) {
} else {
do {
if (!parse_and_add_item()) {
return false;
}
} while (EatIfPresent(delim));
}
return ParseToken(
end, StrCat("expects a list to end with ", TokKindToString(end)));
}
bool HloParserImpl::ParseParamListToShape(Shape* shape, LocTy* shape_loc) {
if (!ParseParamList() || !ParseToken(TokKind::kArrow, "expects '->'")) {
return false;
}
*shape_loc = lexer_.GetLoc();
return ParseShape(shape);
}
bool HloParserImpl::CanBeParamListToShape() {
return lexer_.GetKind() == TokKind::kLparen;
}
bool HloParserImpl::ParseParamList() {
if (!ParseToken(TokKind::kLparen,
"expects '(' at the beginning of param list")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
do {
Shape shape;
std::string name;
if (!ParseName(&name) || !ParseShape(&shape)) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRparen, "expects ')' at the end of param list");
}
bool HloParserImpl::ParseDimensionSizes(std::vector<int64_t>* dimension_sizes,
std::vector<bool>* dynamic_dimensions) {
auto parse_and_add_item = [&]() {
int64_t i;
bool is_dynamic = false;
if (lexer_.GetKind() == TokKind::kQuestionMark) {
i = Shape::kUnboundedSize;
is_dynamic = true;
lexer_.Lex();
} else {
if (lexer_.GetKind() == TokKind::kLeq) {
is_dynamic = true;
lexer_.Lex();
}
if (!ParseInt64(&i)) {
return false;
}
}
dimension_sizes->push_back(i);
dynamic_dimensions->push_back(is_dynamic);
return true;
};
return ParseList(TokKind::kLsquare, TokKind::kRsquare, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseDimLevelTypes(
absl::InlinedVector<DimLevelType, InlineRank()>* dim_level_types,
absl::InlinedVector<bool, InlineRank()>* dim_unique,
absl::InlinedVector<bool, InlineRank()>* dim_ordered) {
auto parse_and_add_item = [&]() {
if (lexer_.GetKind() == TokKind::kIdent) {
bool dim_level_type_valid = false;
DimLevelType dim_level_type;
if (lexer_.GetStrVal() == "D") {
lexer_.Lex();
dim_level_type = DIM_DENSE;
dim_level_type_valid = true;
} else if (lexer_.GetStrVal() == "C") {
lexer_.Lex();
dim_level_type = DIM_COMPRESSED;
dim_level_type_valid = true;
} else if (lexer_.GetStrVal() == "S") {
lexer_.Lex();
dim_level_type = DIM_SINGLETON;
dim_level_type_valid = true;
} else if (lexer_.GetStrVal() == "H") {
lexer_.Lex();
dim_level_type = DIM_LOOSE_COMPRESSED;
dim_level_type_valid = true;
}
if (dim_level_type_valid) {
bool new_dim_unique = true;
if (lexer_.GetKind() == TokKind::kPlus) {
new_dim_unique = false;
lexer_.Lex();
}
bool new_dim_ordered = true;
if (lexer_.GetKind() == TokKind::kTilde) {
new_dim_ordered = false;
lexer_.Lex();
}
if (!LayoutUtil::ValidateDimLevel(dim_level_type, new_dim_unique,
new_dim_ordered)) {
return Error(
lexer_.GetLoc(),
"invalid DimLevelType/unique/ordered combination in shape");
}
dim_level_types->push_back(dim_level_type);
dim_unique->push_back(new_dim_unique);
dim_ordered->push_back(new_dim_ordered);
return true;
}
}
return Error(lexer_.GetLoc(),
"expected a DimLevelType abbreviation (D, C, or S)");
};
return ParseList(TokKind::kLparen, TokKind::kRparen, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseTiles(std::vector<Tile>* tiles) {
auto parse_and_add_tile_dimension = [&]() {
int64_t i;
if (ParseInt64(&i)) {
tiles->back().add_dimensions(i);
return true;
}
if (lexer_.GetKind() == TokKind::kAsterisk) {
tiles->back().add_dimensions(Tile::kCombineDimension);
lexer_.Lex();
return true;
}
return false;
};
do {
tiles->push_back(Tile());
if (!ParseList(TokKind::kLparen, TokKind::kRparen, TokKind::kComma,
parse_and_add_tile_dimension)) {
return false;
}
} while (lexer_.GetKind() == TokKind::kLparen);
return true;
}
bool HloParserImpl::ParsePhysicalShape(Shape* physical_shape) {
if (!ParseToken(TokKind::kLparen,
StrCat("expects physical shape to start with ",
TokKindToString(TokKind::kLparen)))) {
return false;
}
ParseShape(physical_shape);
if (!ParseToken(TokKind::kRparen,
StrCat("expects physical shape to end with ",
TokKindToString(TokKind::kRparen)))) {
return false;
}
return true;
}
bool HloParserImpl::ParsePrimitiveType(PrimitiveType* result) {
if (lexer_.GetKind() != TokKind::kPrimitiveType) {
return TokenError(absl::StrCat("expected primitive type, saw ",
TokKindToString(lexer_.GetKind())));
}
*result = lexer_.GetPrimitiveTypeVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseUnsignedIntegerType(PrimitiveType* primitive_type) {
if (!ParsePrimitiveType(primitive_type)) {
return false;
}
if (!primitive_util::IsUnsignedIntegralType(*primitive_type)) {
return TokenError("expecting an unsigned integer type");
}
return true;
}
bool HloParserImpl::ParseLayoutIntAttribute(
int64_t* attr_value, absl::string_view attr_description) {
if (!ParseToken(TokKind::kLparen,
StrCat("expects ", attr_description, " to start with ",
TokKindToString(TokKind::kLparen)))) {
return false;
}
if (!ParseInt64(attr_value)) {
return false;
}
if (!ParseToken(TokKind::kRparen,
StrCat("expects ", attr_description, " to end with ",
TokKindToString(TokKind::kRparen)))) {
return false;
}
return true;
}
bool HloParserImpl::ParseSplitConfigs(std::vector<SplitConfig>& split_configs) {
auto parse_and_add_split_index = [&]() {
int64_t i;
if (ParseInt64(&i)) {
split_configs.back().add_split_indices(i);
return true;
}
return false;
};
do {
if (!ParseToken(TokKind::kLparen,
StrCat("expects split configs to start with ",
TokKindToString(TokKind::kLparen)))) {
return false;
}
int64_t dimension;
if (!ParseInt64(&dimension)) {
return false;
}
split_configs.push_back(SplitConfig(dimension, {}));
if (!ParseList(TokKind::kColon, TokKind::kRparen, TokKind::kComma,
parse_and_add_split_index)) {
return false;
}
} while (lexer_.GetKind() == TokKind::kLparen);
return true;
}
bool HloParserImpl::ParseLayout(Layout* layout) {
absl::InlinedVector<int64_t, InlineRank()> minor_to_major;
DimLevelTypeVector dim_level_types;
absl::InlinedVector<bool, InlineRank()> dim_unique;
absl::InlinedVector<bool, InlineRank()> dim_ordered;
std::vector<Tile> tiles;
PrimitiveType index_primitive_type = PRIMITIVE_TYPE_INVALID;
PrimitiveType pointer_primitive_type = PRIMITIVE_TYPE_INVALID;
int64_t element_size_in_bits = 0;
int64_t memory_space = 0;
std::vector<SplitConfig> split_configs;
std::optional<Shape> physical_shape;
int64_t dynamic_shape_metadata_prefix_bytes = 0;
int64_t tail_padding_alignment_in_elements = 1;
auto parse_and_add_item = [&]() {
int64_t i;
if (!ParseInt64(&i)) {
return false;
}
minor_to_major.push_back(i);
return true;
};
if (!ParseToken(TokKind::kLbrace,
StrCat("expects layout to start with ",
TokKindToString(TokKind::kLbrace)))) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
if (lexer_.GetKind() == TokKind::kInt) {
do {
if (!parse_and_add_item()) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
if (lexer_.GetKind() == TokKind::kColon) {
lexer_.Lex();
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "D") {
lexer_.Lex();
ParseDimLevelTypes(&dim_level_types, &dim_unique, &dim_ordered);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "T") {
lexer_.Lex();
ParseTiles(&tiles);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "L") {
lexer_.Lex();
ParseLayoutIntAttribute(&tail_padding_alignment_in_elements,
"multiple padded to in elements");
}
if (lexer_.GetKind() == TokKind::kOctothorp) {
lexer_.Lex();
ParseToken(
TokKind::kLparen,
StrCat("expects ", TokKindToString(TokKind::kOctothorp),
" to be followed by ", TokKindToString(TokKind::kLparen)));
ParseUnsignedIntegerType(&index_primitive_type);
ParseToken(TokKind::kRparen,
StrCat("expects index primitive type to be followed by ",
TokKindToString(TokKind::kRparen)));
}
if (lexer_.GetKind() == TokKind::kAsterisk) {
lexer_.Lex();
ParseToken(
TokKind::kLparen,
StrCat("expects ", TokKindToString(TokKind::kAsterisk),
" to be followed by ", TokKindToString(TokKind::kLparen)));
ParseUnsignedIntegerType(&pointer_primitive_type);
ParseToken(TokKind::kRparen,
StrCat("expects pointer primitive type to be followed by ",
TokKindToString(TokKind::kRparen)));
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "E") {
lexer_.Lex();
ParseLayoutIntAttribute(&element_size_in_bits, "element size in bits");
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "S") {
lexer_.Lex();
ParseLayoutIntAttribute(&memory_space, "memory space");
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "SC") {
lexer_.Lex();
ParseSplitConfigs(split_configs);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "P") {
lexer_.Lex();
physical_shape.emplace();
ParsePhysicalShape(&*physical_shape);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "M") {
lexer_.Lex();
ParseLayoutIntAttribute(&dynamic_shape_metadata_prefix_bytes,
"dynamic shape metadata prefix bytes");
}
}
}
if (!ParseToken(TokKind::kRbrace,
StrCat("expects layout to end with ",
TokKindToString(TokKind::kRbrace)))) {
return false;
}
std::vector<Tile> vec_tiles(tiles.size());
for (int i = 0; i < tiles.size(); i++) {
vec_tiles[i] = Tile(tiles[i]);
}
*layout = LayoutUtil::MakeLayout(
minor_to_major, dim_level_types, dim_unique, dim_ordered, vec_tiles,
tail_padding_alignment_in_elements, index_primitive_type,
pointer_primitive_type, element_size_in_bits, memory_space, split_configs,
std::move(physical_shape), dynamic_shape_metadata_prefix_bytes);
return true;
}
bool HloParserImpl::ParseShape(Shape* result) {
if (EatIfPresent(TokKind::kLparen)) {
std::vector<Shape> shapes;
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
do {
shapes.emplace_back();
if (!ParseShape(&shapes.back())) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
*result = ShapeUtil::MakeTupleShape(shapes);
return ParseToken(TokKind::kRparen, "expects ')' at the end of tuple.");
}
PrimitiveType primitive_type;
if (!ParsePrimitiveType(&primitive_type)) {
return false;
}
std::vector<int64_t> dimension_sizes;
std::vector<bool> dynamic_dimensions;
if (!ParseDimensionSizes(&dimension_sizes, &dynamic_dimensions)) {
return false;
}
result->set_element_type(primitive_type);
for (int i = 0; i < dimension_sizes.size(); ++i) {
result->add_dimensions(dimension_sizes[i]);
result->set_dynamic_dimension(i, dynamic_dimensions[i]);
}
if (options_.fill_missing_layouts() || ShapeUtil::IsScalar(*result)) {
LayoutUtil::SetToDefaultLayout(result);
}
if (lexer_.GetKind() == TokKind::kLbrace &&
(lexer_.LookAhead() == TokKind::kInt ||
lexer_.LookAhead() == TokKind::kColon)) {
Layout layout;
if (!ParseLayout(&layout)) {
return false;
}
if (layout.dim_level_types_size() != 0 &&
layout.dim_level_types_size() != result->rank()) {
return Error(
lexer_.GetLoc(),
StrFormat("Dimensions size is %ld, but dim level types size is %ld.",
result->rank(), layout.dim_level_types_size()));
}
if (layout.minor_to_major_size() != result->rank()) {
return Error(
lexer_.GetLoc(),
StrFormat("Dimensions size is %ld, but minor to major size is %ld.",
result->rank(), layout.minor_to_major_size()));
}
if (LayoutUtil::IsSparse(layout) && layout.tiles_size() > 0) {
return Error(lexer_.GetLoc(),
StrFormat("Layout has tiles, but is for a sparse array: %s",
layout.ToString()));
}
if (!LayoutUtil::IsSparse(layout) && layout.has_physical_shape()) {
return Error(
lexer_.GetLoc(),
StrFormat(
"Layout has physical shape, but is not for a sparse array: %s",
layout.ToString()));
}
*result->mutable_layout() = layout;
}
return true;
}
bool HloParserImpl::CanBeShape() {
return lexer_.GetKind() == TokKind::kPrimitiveType ||
lexer_.GetKind() == TokKind::kLparen;
}
bool HloParserImpl::ParseName(std::string* result) {
VLOG(kDebugLevel) << "ParseName";
if (lexer_.GetKind() != TokKind::kIdent &&
lexer_.GetKind() != TokKind::kName) {
return TokenError("expects name");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseAttributeName(std::string* result) {
if (lexer_.GetKind() != TokKind::kAttributeName) {
return TokenError("expects attribute name");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseString(std::string* result) {
VLOG(kDebugLevel) << "ParseString";
if (lexer_.GetKind() != TokKind::kString) {
return TokenError("expects string");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseJsonDict(std::string* result) {
VLOG(kDebugLevel) << "ParseJsonDict";
if (lexer_.LexJsonDict() != TokKind::kString) {
return TokenError("expects JSON dict");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseDxD(const std::string& name,
std::vector<int64_t>* result) {
LocTy loc = lexer_.GetLoc();
if (!result->empty()) {
return Error(loc, StrFormat("sub-attribute '%s=' already exists", name));
}
if (lexer_.GetKind() == TokKind::kInt) {
int64_t number;
if (!ParseInt64(&number)) {
return Error(loc, StrFormat("expects sub-attribute '%s=i'", name));
}
result->push_back(number);
return true;
}
if (lexer_.GetKind() == TokKind::kDxD) {
std::string str = lexer_.GetStrVal();
if (!SplitToInt64s(str, 'x', result)) {
return Error(loc, StrFormat("expects sub-attribute '%s=ixj...'", name));
}
lexer_.Lex();
return true;
}
return TokenError("expects token type kInt or kDxD");
}
bool HloParserImpl::ParseWindowPad(std::vector<std::vector<int64_t>>* pad) {
LocTy loc = lexer_.GetLoc();
if (!pad->empty()) {
return Error(loc, "sub-attribute 'pad=' already exists");
}
if (lexer_.GetKind() != TokKind::kPad) {
return TokenError("expects window pad pattern, e.g., '0_0x3_3'");
}
std::string str = lexer_.GetStrVal();
for (const auto& padding_dim_str : absl::StrSplit(str, 'x')) {
std::vector<int64_t> low_high;
if (!SplitToInt64s(padding_dim_str, '_', &low_high) ||
low_high.size() != 2) {
return Error(loc,
"expects padding_low and padding_high separated by '_'");
}
pad->push_back(low_high);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParsePaddingConfig(PaddingConfig* padding) {
if (lexer_.GetKind() != TokKind::kPad) {
return TokenError("expects padding config, e.g., '0_0_0x3_3_1'");
}
LocTy loc = lexer_.GetLoc();
std::string str = lexer_.GetStrVal();
for (const auto& padding_dim_str : absl::StrSplit(str, 'x')) {
std::vector<int64_t> padding_dim;
if (!SplitToInt64s(padding_dim_str, '_', &padding_dim) ||
(padding_dim.size() != 2 && padding_dim.size() != 3)) {
return Error(loc,
"expects padding config pattern like 'low_high_interior' or "
"'low_high'");
}
auto* dim = padding->add_dimensions();
dim->set_edge_padding_low(padding_dim[0]);
dim->set_edge_padding_high(padding_dim[1]);
dim->set_interior_padding(padding_dim.size() == 3 ? padding_dim[2] : 0);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseOriginalValue(
optional<std::shared_ptr<OriginalValue>>* original_value,
const Shape& shape) {
VLOG(kDebugLevel) << "ParseOriginalValue";
if (!ParseToken(TokKind::kLbrace, "Expects '{'")) {
return false;
}
*original_value = std::make_shared<OriginalValue>(shape);
ShapeIndex leaf_shape_index;
while (lexer_.GetKind() != TokKind::kRbrace) {
if (lexer_.GetKind() == TokKind::kLparen) {
lexer_.Lex();
leaf_shape_index.push_back(0);
} else if (lexer_.GetKind() == TokKind::kRparen) {
lexer_.Lex();
leaf_shape_index.pop_back();
} else if (lexer_.GetKind() == TokKind::kComma) {
lexer_.Lex();
++leaf_shape_index.back();
} else if (lexer_.GetKind() == TokKind::kLbrace) {
lexer_.Lex();
std::string instruction_name;
ShapeIndex shape_index;
if (!ParseString(&instruction_name)) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
if (!ParseShapeIndex(&shape_index)) {
return false;
}
}
*(**original_value)->mutable_element(leaf_shape_index) = {
instruction_name, shape_index};
if (!ParseToken(TokKind::kRbrace,
"Expects '} at end of each OriginalArray'")) {
return false;
}
} else {
return false;
}
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseMetadata(OpMetadata& metadata) {
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<std::string> op_type;
optional<std::string> op_name;
optional<std::string> source_file;
optional<int32_t> source_line;
optional<std::vector<int64_t>> profile_type;
optional<std::string> deduplicated_name;
optional<bool> preserve_layout;
optional<std::string> scheduling_name;
attrs["op_type"] = {false, AttrTy::kString, &op_type};
attrs["op_name"] = {false, AttrTy::kString, &op_name};
attrs["source_file"] = {false, AttrTy::kString, &source_file};
attrs["source_line"] = {false, AttrTy::kInt32, &source_line};
attrs["profile_type"] = {false, AttrTy::kBracedInt64List,
&profile_type};
attrs["deduplicated_name"] = {false, AttrTy::kString,
&deduplicated_name};
attrs["preserve_layout"] = {false, AttrTy::kBool,
&preserve_layout};
attrs["scheduling_name"] = {false, AttrTy::kString,
&scheduling_name};
if (!ParseSubAttributes(attrs)) {
return false;
}
if (op_type) {
metadata.set_op_type(*op_type);
}
if (op_name) {
metadata.set_op_name(*op_name);
}
if (source_file) {
metadata.set_source_file(*source_file);
}
if (source_line) {
metadata.set_source_line(*source_line);
}
if (profile_type) {
for (const auto& type : *profile_type) {
if (!ProfileType_IsValid(type)) {
return false;
}
metadata.add_profile_type(static_cast<ProfileType>(type));
}
}
if (deduplicated_name) {
metadata.set_deduplicated_name(*deduplicated_name);
}
if (preserve_layout) {
metadata.set_preserve_layout(*preserve_layout);
} else {
metadata.set_preserve_layout(false);
}
if (scheduling_name) {
metadata.set_scheduling_name(*scheduling_name);
}
return true;
}
bool HloParserImpl::ParseSingleOrListMetadata(
std::vector<OpMetadata>& metadata) {
if (lexer_.GetKind() == TokKind::kLbrace &&
lexer_.LookAhead() == TokKind::kLbrace) {
if (!ParseToken(TokKind::kLbrace, "expected '{' to start metadata list")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (!ParseMetadata(metadata.emplace_back())) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace, "expected '}' to end metadata list");
}
return ParseMetadata(metadata.emplace_back());
}
bool HloParserImpl::ParseOpShardingType(OpSharding::Type* type) {
switch (lexer_.GetKind()) {
case TokKind::kw_maximal:
*type = OpSharding::MAXIMAL;
lexer_.Lex();
break;
case TokKind::kw_replicated:
*type = OpSharding::REPLICATED;
lexer_.Lex();
break;
case TokKind::kw_manual:
*type = OpSharding::MANUAL;
lexer_.Lex();
break;
default:
return false;
}
return true;
}
bool HloParserImpl::ParseListShardingType(
std::vector<OpSharding::Type>* types) {
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding type list")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
OpSharding::Type type;
if (!ParseOpShardingType(&type)) {
return false;
}
types->emplace_back(type);
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace, "expected '}' to end sharding type list");
}
bool HloParserImpl::ParseOpcode(
HloOpcode* opcode, std::optional<HloOpcode>* async_wrapped_opcode) {
VLOG(kDebugLevel) << "ParseOpcode";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects opcode");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToHloOpcode(val);
if (!status_or_result.ok()) {
auto try_parsing_async_op = [&](absl::string_view suffix,
HloOpcode async_opcode) {
absl::string_view wrapped_opcode_view(val);
if (absl::ConsumeSuffix(&wrapped_opcode_view, suffix)) {
*opcode = async_opcode;
std::string wrapped_opcode(wrapped_opcode_view);
status_or_result = StringToHloOpcode(wrapped_opcode);
return true;
}
return false;
};
if (try_parsing_async_op("-start", HloOpcode::kAsyncStart) ||
try_parsing_async_op("-update", HloOpcode::kAsyncUpdate) ||
try_parsing_async_op("-done", HloOpcode::kAsyncDone)) {
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects async wrapped opcode but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*async_wrapped_opcode = status_or_result.value();
} else {
return TokenError(StrFormat("expects opcode but sees: %s, error: %s", val,
status_or_result.status().message()));
}
} else {
*opcode = status_or_result.value();
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseFftType(FftType* result) {
VLOG(kDebugLevel) << "ParseFftType";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects fft type");
}
std::string val = lexer_.GetStrVal();
if (!FftType_Parse(val, result) || !FftType_IsValid(*result)) {
return TokenError(StrFormat("expects fft type but sees: %s", val));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParsePaddingType(PaddingType* result) {
VLOG(kDebugLevel) << "ParsePaddingType";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects padding type");
}
std::string val = lexer_.GetStrVal();
if (!PaddingType_Parse(val, result) || !PaddingType_IsValid(*result)) {
return TokenError(StrFormat("expects padding type but sees: %s", val));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseComparisonDirection(ComparisonDirection* result) {
VLOG(kDebugLevel) << "ParseComparisonDirection";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects comparison direction");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToComparisonDirection(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects comparison direction but sees: %s", val));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseComparisonType(Comparison::Type* result) {
VLOG(kDebugLevel) << "ParseComparisonType";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects comparison type");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToComparisonType(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects comparison type but sees: %s", val));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseFusionKind(HloInstruction::FusionKind* result) {
VLOG(kDebugLevel) << "ParseFusionKind";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects fusion kind");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToFusionKind(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects fusion kind but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseRandomDistribution(RandomDistribution* result) {
VLOG(kDebugLevel) << "ParseRandomDistribution";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random distribution");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToRandomDistribution(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects random distribution but sees: %s, error: %s", val,
status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseRandomAlgorithm(RandomAlgorithm* result) {
VLOG(kDebugLevel) << "ParseRandomAlgorithm";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random algorithm");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToRandomAlgorithm(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects random algorithm but sees: %s, error: %s", val,
status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParsePrecision(PrecisionConfig::Precision* result) {
VLOG(kDebugLevel) << "ParsePrecision";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random distribution");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToPrecision(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects precision but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseAlgorithm(PrecisionConfig::Algorithm* result) {
VLOG(kDebugLevel) << "ParseAlgorithm";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects algorithm");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToAlgorithm(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects algorithm but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseInt64(int64_t* result) {
VLOG(kDebugLevel) << "ParseInt64";
if (lexer_.GetKind() != TokKind::kInt) {
return TokenError("expects integer");
}
*result = lexer_.GetInt64Val();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseDouble(double* result) {
switch (lexer_.GetKind()) {
case TokKind::kDecimal: {
double val = lexer_.GetDecimalVal();
if (std::isinf(val)) {
return TokenError(StrCat("Constant is out of range for double (+/-",
std::numeric_limits<double>::max(),
") and so is unparsable."));
}
*result = val;
break;
}
case TokKind::kInt:
*result = static_cast<double>(lexer_.GetInt64Val());
break;
case TokKind::kw_inf:
*result = std::numeric_limits<double>::infinity();
break;
case TokKind::kNegInf:
*result = -std::numeric_limits<double>::infinity();
break;
default:
return TokenError("expects decimal or integer");
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseComplex(std::complex<double>* result) {
if (lexer_.GetKind() != TokKind::kLparen) {
return TokenError("expects '(' before complex number");
}
lexer_.Lex();
double real;
LocTy loc = lexer_.GetLoc();
if (!ParseDouble(&real)) {
return Error(loc,
"expect floating-point value for real part of complex number");
}
if (lexer_.GetKind() != TokKind::kComma) {
return TokenError(
absl::StrFormat("expect comma after real part of complex literal"));
}
lexer_.Lex();
double imag;
loc = lexer_.GetLoc();
if (!ParseDouble(&imag)) {
return Error(
loc,
"expect floating-point value for imaginary part of complex number");
}
if (lexer_.GetKind() != TokKind::kRparen) {
return TokenError(absl::StrFormat("expect ')' after complex number"));
}
*result = std::complex<double>(real, imag);
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseBool(bool* result) {
if (lexer_.GetKind() != TokKind::kw_true &&
lexer_.GetKind() != TokKind::kw_false) {
return TokenError("expects true or false");
}
*result = lexer_.GetKind() == TokKind::kw_true;
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseToken(TokKind kind, const std::string& msg) {
VLOG(kDebugLevel) << "ParseToken " << TokKindToString(kind) << " " << msg;
if (lexer_.GetKind() != kind) {
return TokenError(msg);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::EatIfPresent(TokKind kind) {
if (lexer_.GetKind() != kind) {
return false;
}
lexer_.Lex();
return true;
}
bool HloParserImpl::AddInstruction(const std::string& name,
HloInstruction* instruction,
LocTy name_loc) {
auto result = current_name_table().insert({name, {instruction, name_loc}});
if (!result.second) {
Error(name_loc, StrCat("instruction already exists: ", name));
return Error(result.first->second.second,
"instruction previously defined here");
}
return true;
}
bool HloParserImpl::AddComputation(const std::string& name,
HloComputation* computation,
LocTy name_loc) {
auto result = computation_pool_.insert({name, {computation, name_loc}});
if (!result.second) {
Error(name_loc, StrCat("computation already exists: ", name));
return Error(result.first->second.second,
"computation previously defined here");
}
return true;
}
absl::StatusOr<Shape> HloParserImpl::ParseShapeOnly() {
lexer_.Lex();
Shape shape;
if (!ParseShape(&shape)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after shape");
}
return shape;
}
absl::StatusOr<Layout> HloParserImpl::ParseLayoutOnly() {
lexer_.Lex();
Layout layout;
if (!ParseLayout(&layout)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after layout");
}
return layout;
}
absl::StatusOr<HloSharding> HloParserImpl::ParseShardingOnly() {
lexer_.Lex();
std::optional<HloSharding> sharding;
if (!ParseSharding(sharding)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after sharding");
}
return std::move(*sharding);
}
absl::StatusOr<FrontendAttributes>
HloParserImpl::ParseFrontendAttributesOnly() {
lexer_.Lex();
FrontendAttributes attributes;
if (!ParseFrontendAttributes(&attributes)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
"Syntax error:\nExtra content after frontend attributes");
}
return attributes;
}
absl::StatusOr<StatisticsViz> HloParserImpl::ParseStatisticsVizOnly() {
lexer_.Lex();
StatisticsViz statistics_viz;
if (!ParseStatisticsViz(&statistics_viz)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after statistics");
}
return statistics_viz;
}
absl::StatusOr<std::vector<bool>>
HloParserImpl::ParseParameterReplicationOnly() {
lexer_.Lex();
ParameterReplication parameter_replication;
if (!ParseParameterReplication(¶meter_replication)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
"Syntax error:\nExtra content after parameter replication");
}
return std::vector<bool>(
parameter_replication.replicated_at_leaf_buffers().begin(),
parameter_replication.replicated_at_leaf_buffers().end());
}
absl::StatusOr<HloParserImpl::BoolList>
HloParserImpl::ParseBooleanListOrSingleBooleanOnly() {
lexer_.Lex();
BoolList booleans;
if (!ParseBooleanListOrSingleBoolean(&booleans)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after boolean list");
}
return booleans;
}
absl::StatusOr<std::vector<ReplicaGroup>>
HloParserImpl::ParseReplicaGroupsOnly() {
lexer_.Lex();
std::vector<ReplicaGroup> replica_groups;
if (!ParseReplicaGroupsOnly(&replica_groups)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after replica groups");
}
return replica_groups;
}
absl::StatusOr<Window> HloParserImpl::ParseWindowOnly() {
lexer_.Lex();
Window window;
if (!ParseWindow(&window, false)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after window");
}
return window;
}
absl::StatusOr<ConvolutionDimensionNumbers>
HloParserImpl::ParseConvolutionDimensionNumbersOnly() {
lexer_.Lex();
ConvolutionDimensionNumbers dnums;
if (!ParseConvolutionDimensionNumbers(&dnums)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
"Syntax error:\nExtra content after convolution dnums");
}
return dnums;
}
absl::StatusOr<PaddingConfig> HloParserImpl::ParsePaddingConfigOnly() {
lexer_.Lex();
PaddingConfig padding_config;
if (!ParsePaddingConfig(&padding_config)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after PaddingConfig");
}
return padding_config;
}
bool HloParserImpl::ParseSingleInstruction(HloModule* module) {
if (create_missing_instruction_ != nullptr || !scoped_name_tables_.empty()) {
LOG(FATAL) << "Parser state is not clean. Please do not call any other "
"methods before calling ParseSingleInstruction.";
}
HloComputation::Builder builder(module->name());
int64_t parameter_count = 0;
create_missing_instruction_ =
[this, &builder, ¶meter_count](
const std::string& name,
const Shape& shape) -> std::pair<HloInstruction*, LocTy>* {
std::string new_name = name.empty() ? StrCat("_", parameter_count) : name;
HloInstruction* parameter = builder.AddInstruction(
HloInstruction::CreateParameter(parameter_count++, shape, new_name));
current_name_table()[new_name] = {parameter, lexer_.GetLoc()};
return tsl::gtl::FindOrNull(current_name_table(), new_name);
};
Scope scope(&scoped_name_tables_);
if (CanBeShape()) {
if (!ParseInstructionRhs(&builder, module->name(), lexer_.GetLoc())) {
return false;
}
} else {
std::string root_name;
if (!ParseInstruction(&builder, &root_name)) {
return false;
}
}
if (lexer_.GetKind() != TokKind::kEof) {
Error(
lexer_.GetLoc(),
"Syntax error:\nExpected eof after parsing single instruction. Did you"
" mean to write an HLO module and forget the \"HloModule\" header?");
return false;
}
module->AddEntryComputation(builder.Build());
for (auto& comp : computations_) {
module->AddEmbeddedComputation(std::move(comp));
}
TF_CHECK_OK(module->set_schedule(ScheduleFromInstructionOrder(module)));
return true;
}
}
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndReturnUnverifiedModule(
absl::string_view str, const HloModuleConfig& config,
const HloParserOptions& options) {
auto module = std::make_unique<HloModule>("_", config);
HloParserImpl parser(str, options);
TF_RETURN_IF_ERROR(parser.Run(module.get()));
return std::move(module);
}
absl::StatusOr<HloSharding> ParseSharding(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseShardingOnly();
}
absl::StatusOr<FrontendAttributes> ParseFrontendAttributes(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseFrontendAttributesOnly();
}
absl::StatusOr<StatisticsViz> ParseStatisticsViz(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseStatisticsVizOnly();
}
absl::StatusOr<std::vector<bool>> ParseParameterReplication(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseParameterReplicationOnly();
}
absl::StatusOr<HloParserImpl::BoolList> ParseBooleanListOrSingleBoolean(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseBooleanListOrSingleBooleanOnly();
}
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseReplicaGroupsOnly();
}
absl::StatusOr<Window> ParseWindow(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseWindowOnly();
}
absl::StatusOr<ConvolutionDimensionNumbers> ParseConvolutionDimensionNumbers(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseConvolutionDimensionNumbersOnly();
}
absl::StatusOr<PaddingConfig> ParsePaddingConfig(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParsePaddingConfigOnly();
}
absl::StatusOr<Shape> ParseShape(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseShapeOnly();
}
absl::StatusOr<Layout> ParseLayout(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseLayoutOnly();
}
std::unique_ptr<HloParser> HloParser::CreateHloParserForTests(
absl::string_view str) {
return std::make_unique<HloParserImpl>(str);
}
} | #include "xla/hlo/parser/hlo_parser.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/parser/hlo_lexer.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::absl::string_view;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
struct TestData {
std::string test_name;
std::string module_string;
int64_t replica_count = 1;
bool enable_verification = true;
};
std::string TestDataToString(const ::testing::TestParamInfo<TestData>& data) {
return data.param.test_name;
}
struct NonRoundtripTestData {
std::string test_name;
std::string input_module_string;
std::string output_module_string;
};
std::string NonRoundtripTestDataToString(
const ::testing::TestParamInfo<NonRoundtripTestData>& data) {
return data.param.test_name;
}
std::vector<TestData> CreateTestCases() {
return std::vector<TestData>({
{
"AxpyParam",
R"(HloModule axpy_module, entry_computation_layout={(f32[], f32[2,4]{1,0}, f32[2,4]{1,0})->f32[2,4]{1,0}}
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)"
},
{
"ParamReplication",
R"(HloModule param_replication_module, entry_computation_layout={(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))->(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))}
ENTRY %param_replication (a: f32[], b: (f32[2,4], (f32[2,4]))) -> (f32[], (f32[2,4], (f32[2,4]))) {
%a = f32[] parameter(0), parameter_replication={true}
%b = (f32[2,4]{1,0}, (f32[2,4]{1,0})) parameter(1), parameter_replication={false,true}
ROOT %tuple = (f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0}))) tuple(f32[] %a, (f32[2,4]{1,0}, (f32[2,4]{1,0})) %b)
}
)"
},
{
"ConstantPred",
R"(HloModule constant_pred_module, entry_computation_layout={()->pred[]}
ENTRY %constant_pred () -> pred[] {
ROOT %constant = pred[] constant(true), metadata={op_type="const" op_name="\"it\'s not a problem\n" source_file="path/to/test.cc" source_line=68}, backend_config="foo\" bar"
}
)"
},
{
"ConstantPredArray",
R"(HloModule module, entry_computation_layout={()->pred[2,3]{1,0}}
ENTRY %constant_pred_array () -> pred[2,3] {
ROOT %constant = pred[2,3]{1,0} constant({ { 0, 1, 0 }, { 1, 0, 1 } })
}
)"
},
{
"ConstantS32",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42)
}
)"
},
{
"ConstantS32WithStatistics",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42), statistics={visualizing_index=1,stat-1=33,stat-2=44}
}
)"
},
{
"ConstantF32",
R"(HloModule ConstantF32_module, entry_computation_layout={()->f32[]}
ENTRY %ConstantF32.v4 () -> f32[] {
ROOT %constant = f32[] constant(42), backend_config="this is a configuration"
}
)"
},
{
"ConstantF32R1Empty",
R"(HloModule ConstantF32Empty_module, entry_computation_layout={()->f32[0]{0}}
ENTRY %ConstantF32Empty.v4 () -> f32[0] {
ROOT %constant = f32[0]{0} constant({})
}
)"
},
{
"ConstantF32R4Empty",
R"(HloModule ConstantF32R4Empty_module, entry_computation_layout={()->f32[2,0,4,3]{3,2,1,0}}
ENTRY %ConstantF32R4Empty.v4 () -> f32[2,0,4,3] {
ROOT %constant = f32[2,0,4,3]{3,2,1,0} constant({ { }, { } })
}
)"
},
{
"Constant4D",
R"(HloModule Small_3x2x1x1_module, entry_computation_layout={()->f32[3,2,1,1]{3,2,1,0}}
ENTRY %Small_3x2x1x1.v1 () -> f32[3,2,1,1] {
ROOT %constant = f32[3,2,1,1]{3,2,1,0} constant({ { { {-1} }, { {4.1} } }, { { {2} }, { {4.1} } }, { { {5} }, { {4.4} } } })
}
)"
},
{
"ConstantNonFinite",
R"(HloModule IsFiniteR1F32s_module, entry_computation_layout={()->pred[6]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> pred[6] {
%constant = f32[6]{0} constant({nan, 7, nan, -1, inf, -inf})
ROOT %is-finite = pred[6]{0} is-finite(f32[6]{0} %constant)
}
)"
},
{
"ConstantNonFiniteE4M3",
R"(HloModule ConstantR1F8E4M3FNs_module, entry_computation_layout={()->f8e4m3fn[3]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3fn[3] {
ROOT %constant = f8e4m3fn[3]{0} constant({nan, 7, -nan})
}
)"
},
{
"ConstantNonFiniteE4M3B11",
R"(HloModule ConstantR1F8E4M3B11_module, entry_computation_layout={()->f8e4m3b11fnuz[2]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3b11fnuz[2] {
ROOT %constant = f8e4m3b11fnuz[2]{0} constant({-nan, 7})
}
)"
},
{
"ConstantF16",
R"(HloModule ConstantF16_module, entry_computation_layout={()->f16[]}
ENTRY %ConstantF16.v4 () -> f16[] {
ROOT %constant = f16[] constant(500)
}
)"
},
{
"BF16",
R"(HloModule BF16, entry_computation_layout={()->bf16[]}
ENTRY %BF16.v4 () -> bf16[] {
ROOT %constant = bf16[] constant(500)
}
)"
},
{
"AddConstants",
R"(HloModule add_constants_module, entry_computation_layout={()->f32[]}
ENTRY %add_constants () -> f32[] {
%constant = f32[] constant(3.14)
ROOT %add = f32[] add(f32[] %constant, f32[] %constant)
}
)"
},
{
"TupleConstant",
R"(HloModule TupleConstant_module, entry_computation_layout={()->(f32[2,1]{1,0}, f32[2]{0})}
ENTRY %TupleConstant.v1 () -> (f32[2,1], f32[2]) {
ROOT %constant = (f32[2,1]{1,0}, f32[2]{0}) constant(( { {1}, {2} }, {2, 42} ))
}
)"
},
{
"SelectR1F32",
R"(HloModule SelectR1F32WithCmpR1F32sFromParamsSmall_module, entry_computation_layout={(f32[4]{0}, f32[4]{0})->f32[4]{0}}
ENTRY %SelectR1F32WithCmpR1F32sFromParamsSmall.v4 (v1: f32[4], v2: f32[4]) -> f32[4] {
%v1 = f32[4]{0} parameter(0), sharding={maximal device=1}
%v2 = f32[4]{0} parameter(1), sharding={maximal device=1}
%greater-than = pred[4]{0} compare(f32[4]{0} %v1, f32[4]{0} %v2), direction=GT, type=TOTALORDER, sharding={replicated}
ROOT %select = f32[4]{0} select(pred[4]{0} %greater-than, f32[4]{0} %v1, f32[4]{0} %v2), sharding={replicated}
}
)"
},
{
"EmptyTupleCreate",
R"(HloModule EmptyTupleCreate_module, entry_computation_layout={()->()}
ENTRY %EmptyTupleCreate.v1 () -> () {
ROOT %tuple = () tuple()
}
)"
},
{
"TupleCreate",
R"(HloModule TupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)"
},
{
"LargeTupleRoundTrip",
R"(HloModule LargeTupleRoundTrip_module, entry_computation_layout={(f32[])->(f32[], f32[], f32[], f32[], f32[], f32[])}
ENTRY %TupleCreate.v4 (v: f32[]) -> (f32[], f32[], f32[], f32[], f32[], f32[]) {
%v = f32[] parameter(0)
ROOT %tuple = (f32[], f32[], f32[], f32[], f32[], f32[]) tuple(f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v)
}
)"
},
{
"ShardedTupleCreate",
R"(HloModule ShardedTupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %ShardedTupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0), sharding={manual}
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3), sharding={{manual}, {maximal device=0}, {replicated}}
}
)"
},
{
"DomainParsing",
R"(HloModule DomainParsing_module, entry_computation_layout={(f32[])->f32[]}
ENTRY %DomainParsing (v1: f32[]) -> f32[] {
%v1 = f32[] parameter(0)
ROOT %dom = f32[] domain(f32[] %v1), domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
}
)"
},
{
"WhileWithScalarS32Result",
R"(HloModule WhileWithScalarS32Result_module, entry_computation_layout={()->s32[]}
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)"
},
{
"CopyStartAndCopyDone",
R"(HloModule CopyStartAndCopyDone_module, entry_computation_layout={(f32[], f32[2,3]{1,0:S(1)})->(f32[], f32[2,3]{1,0:S(2)})}
ENTRY %CopyStartAndCopyDone (v1: f32[], v2: f32[2,3]) -> (f32[], f32[2,3]) {
%v1 = f32[] parameter(0)
%copy-start.1 = (f32[], f32[], u32[]) copy-start(f32[] %v1), cross_program_prefetch_index=0
%copy-done.1 = f32[] copy-done((f32[], f32[], u32[]) %copy-start.1)
%v2 = f32[2,3]{1,0:S(1)} parameter(1)
%copy-start.2 = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(f32[2,3]{1,0:S(1)} %v2)
%copy-done.2 = f32[2,3]{1,0:S(2)} copy-done((f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) %copy-start.2)
ROOT %tuple = (f32[], f32[2,3]{1,0:S(2)}) tuple(f32[] %copy-done.1, f32[2,3]{1,0:S(2)} %copy-done.2)
}
)"
},
{
"SendRecv",
R"(HloModule TwoSendRecvBothWayRecvFist_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, sharding={{maximal device=1}, {replicated}, {replicated}}
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, sharding={{maximal device=1}, {replicated}}
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, sharding={{maximal device=1}, {replicated}, {replicated}}, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, sharding={maximal device=0}
}
)"
},
{
"SendRecvWithHostTransfer",
R"(HloModule HostTransferSendRecv_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, is_host_transfer=true
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, is_host_transfer=true
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
}
)"
},
{
"GetTupleElement",
R"(HloModule GetTupleElement_module, entry_computation_layout={()->s32[2,3]{1,0}}
ENTRY %GetTupleElement.v4 () -> s32[2,3] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
%tuple = (f32[3]{0}, s32[2,3]{1,0}) tuple(f32[3]{0} %constant, s32[2,3]{1,0} %constant.1)
ROOT %get-tuple-element = s32[2,3]{1,0} get-tuple-element((f32[3]{0}, s32[2,3]{1,0}) %tuple), index=1, sharding={maximal device=0}
}
)"
},
{
"Call",
R"(HloModule CallR0F32IdentityScalar_module, entry_computation_layout={()->f32[]}
%Identity.v1 (x: f32[]) -> f32[] {
ROOT %x = f32[] parameter(0)
}
ENTRY %CallR0F32IdentityScalar.v2 () -> f32[] {
%constant = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant), to_apply=%Identity.v1
}
)"
},
{
"CompositeCall",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1"}
}
)"
},
{
"CompositeCallWithExtraFrontendAttributes",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1",foo="bar"}
}
)"
},
{
"CompositeCallOptionalAttributesAndVersion",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.name="foo.bar"}
}
)"
},
{
"CompositeCallOptionalAttributes",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.name="foo.bar",composite.version="1"}
}
)"
},
{
"CompositeCallOptionalVersion",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar"}
}
)"
},
{
"CustomCallWithOpaque",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config="this string is opaque"
}
)"
},
{
"CustomCallWithBackendConfigInCurlyBraces",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config={key: "value"}
}
)"
},
{
"CustomCallWithLiteral",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=s32[2]{0} {1, 2}
}
)"
},
{
"CustomCallWithLiteralTuple",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=( s32[4]{0} {4, 128, 128, 3}, pred[4]{0} {1, 0, 0, 0} )
}
)"
},
{
"CustomCallWithLiteralR0",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=f32[] 0.1
}
)"
},
{
"ReduceWindow",
R"(HloModule R4UnitWindow_module, entry_computation_layout={(f32[13,12,8,15]{0,3,2,1})->f32[13,3,8,15]{0,3,2,1}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindow.v3 (operand: f32[13,12,8,15]) -> f32[13,3,8,15] {
%operand = f32[13,12,8,15]{0,3,2,1} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[13,3,8,15]{0,3,2,1} reduce-window(f32[13,12,8,15]{0,3,2,1} %operand, f32[] %constant), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowScalar",
R"(HloModule reduce_window_scalar, entry_computation_layout={()->f32[]}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindowScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = f32[] reduce-window(f32[] %constant, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowVariadic",
R"(HloModule reduce_window_variadic, entry_computation_layout={()->(f32[], f32[])}
%add_F32.v3 (lhs1: f32[], lhs2: f32[], rhs1: f32[], rhs2: f32[]) -> (f32[], f32[]) {
%lhs1 = f32[] parameter(0)
%rhs1 = f32[] parameter(2)
%add1 = f32[] add(f32[] %lhs1, f32[] %rhs1)
%lhs2 = f32[] parameter(1)
%rhs2 = f32[] parameter(3)
%add2 = f32[] add(f32[] %lhs2, f32[] %rhs2)
ROOT %tuple1 = (f32[], f32[]) tuple(f32[] %add1, f32[] %add2)
}
ENTRY %R4UnitWindowScalar () -> (f32[], f32[]) {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = (f32[], f32[]) reduce-window(f32[] %constant, f32[] %constant, f32[] %constant.1, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"Convolution",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}
}
)"
},
{
"ConvolutionDynamic",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %custom-call.52 = f32[1,2,1]{2,0,1} custom-call(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}, custom_call_target="DynamicConvolutionForward", metadata={op_type="Conv2D" op_name="conv1d"}
}
)"
},
{
"ConvolutionR2",
R"(HloModule ConvolveR2_module, entry_computation_layout={(f32[1,2]{1,0}, f32[2,2]{1,0})->f32[1,2]{0,1}}
ENTRY %ConvolveR2.v3 (input: f32[1,2], filter: f32[2,2]) -> f32[1,2] {
%input = f32[1,2]{1,0} parameter(0)
%filter = f32[2,2]{1,0} parameter(1)
ROOT %convolution = f32[1,2]{0,1} convolution(f32[1,2]{1,0} %input, f32[2,2]{1,0} %filter), dim_labels=bf_io->bf
}
)"
},
{
"ConvolutionBackward",
R"(HloModule ConvolveBackward_module, entry_computation_layout={(f32[128,7,7,512]{0,3,2,1}, f32[3,3,512,512]{3,2,1,0})->f32[128,14,14,512]{0,3,2,1}}
ENTRY %ConvolveBackward (input: f32[128,7,7,512], filter: f32[3,3,512,512]) -> f32[128,14,14,512] {
%input = f32[128,7,7,512]{0,3,2,1} parameter(0)
%filter = f32[3,3,512,512]{3,2,1,0} parameter(1)
ROOT %convolution-base-dilated = f32[128,14,14,512]{0,3,2,1} convolution(f32[128,7,7,512]{0,3,2,1} %input, f32[3,3,512,512]{3,2,1,0} %filter), window={size=3x3 pad=1_2x1_2 lhs_dilate=2x2 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
}
)"
},
{
"Reverse4D",
R"(HloModule Reverse4DFloatArrayOnDim01_module, entry_computation_layout={()->f32[4,3,2,1]{0,1,2,3}}
ENTRY %Reverse4DFloatArrayOnDim01.v2 () -> f32[4,3,2,1] {
%constant = f32[4,3,2,1]{0,1,2,3} constant({ { { {1}, {2} }, { {3}, {4} }, { {5}, {6} } }, { { {7}, {8} }, { {9}, {10} }, { {11}, {12} } }, { { {13}, {14} }, { {15}, {16} }, { {17}, {18} } }, { { {19}, {20} }, { {21}, {22} }, { {23}, {24} } } })
ROOT %reverse = f32[4,3,2,1]{0,1,2,3} reverse(f32[4,3,2,1]{0,1,2,3} %constant), dimensions={0,1}
}
)"
},
{
"Concat",
R"(HloModule Concat2x3With2x5_module, entry_computation_layout={()->f32[2,8]{1,0}}
ENTRY %Concat2x3With2x5.v3 () -> f32[2,8] {
%constant = f32[2,3]{1,0} constant({ { 0, 1, 2 }, { 1000, 1001, 1002 } })
%constant.1 = f32[2,5]{1,0} constant({ { 64, 65, 66, 67, 68 }, { 1064, 1065, 1066, 1067, 1068 } })
ROOT %concatenate = f32[2,8]{1,0} concatenate(f32[2,3]{1,0} %constant, f32[2,5]{1,0} %constant.1), dimensions={1}
}
)"
},
{
"SelectAndScatter",
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"SelectAndScatterScalar",
R"(HloModule select_and_scatter_scalar, entry_computation_layout={()->f32[]}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %SelectAndScatterScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
ROOT %select-and-scatter = f32[] select-and-scatter(f32[] %constant, f32[] %constant.1, f32[] %constant.2), select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"Slice",
R"(HloModule slice_module, entry_computation_layout={(f32[3,3,4,4]{3,2,1,0})->f32[3,3,2,4]{3,2,1,0}}
ENTRY %slice.v2 (p0: f32[3,3,4,4]) -> f32[3,3,2,4] {
%p0 = f32[3,3,4,4]{3,2,1,0} parameter(0)
ROOT %slice = f32[3,3,2,4]{3,2,1,0} slice(f32[3,3,4,4]{3,2,1,0} %p0), slice={[0:3:1], [0:3:1], [0:4:2], [0:4:1]}
}
)"
},
{
"SliceNoStride",
R"(HloModule Slice3x3x3_To_1x3x3_F32_module, entry_computation_layout={()->f32[1,3,3]{2,1,0}}
ENTRY %Slice3x3x3_To_1x3x3_F32.v2 () -> f32[1,3,3] {
%constant = f32[3,3,3]{2,1,0} constant({ { { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 } }, { { 9, 10, 11 }, { 12, 13, 14 }, { 15, 16, 17 } }, { { 18, 19, 20 }, { 21, 22, 23 }, { 24, 25, 26 } } })
ROOT %slice = f32[1,3,3]{2,1,0} slice(f32[3,3,3]{2,1,0} %constant), slice={[0:1], [0:3], [0:3]}
}
)"
},
{
"SliceR0",
R"(HloModule SliceR0_module, entry_computation_layout={()->s32[]}
ENTRY %SliceR0.v2 () -> s32[] {
%constant = s32[] constant(1)
ROOT %slice = s32[] slice(s32[] %constant), slice={}
}
)"
},
{
"Transpose",
R"(HloModule Transpose_module, entry_computation_layout={()->s32[1,2,3]{2,1,0}}
ENTRY %Transpose.v2 () -> s32[1,2,3] {
%constant = s32[1,2,3]{2,1,0} constant({ { { 1, 2, 3 }, { 4, 5, 6 } } })
ROOT %transpose = s32[1,2,3]{2,1,0} transpose(s32[1,2,3]{2,1,0} %constant), dimensions={0,1,2}
}
)"
},
{
"TransposeC128",
R"(HloModule TransposeC128_module, entry_computation_layout={(c128[1,2,3]{2,1,0})->c128[1,2,3]{2,1,0}}
ENTRY %Transpose.v3 (input: c128[1,2,3]) -> c128[1,2,3] {
%input = c128[1,2,3]{2,1,0} parameter(0)
ROOT %transpose = c128[1,2,3]{2,1,0} transpose(c128[1,2,3]{2,1,0} %input), dimensions={0,1,2}
}
)"
},
{
"TriangularSolve",
R"(HloModule TriangularSolve_module, entry_computation_layout={(f32[4,4]{1,0}, f32[3,4]{1,0})->f32[3,4]{1,0}}
ENTRY %SimpleRightLowerNotranspose.4 (a.1: f32[4,4], b.2: f32[3,4]) -> f32[3,4] {
%a.1 = f32[4,4]{1,0} parameter(0)
%b.2 = f32[3,4]{1,0} parameter(1)
ROOT %triangular-solve.3 = f32[3,4]{1,0} triangular-solve(f32[4,4]{1,0} %a.1, f32[3,4]{1,0} %b.2), lower=true, transpose_a=NO_TRANSPOSE
}
)"
},
{
"DynamicSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[1]{0})->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[1]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[1]{0} constant({0})
%start_index = s32[1]{0} parameter(1)
%concatenate = s32[3]{0} concatenate(s32[1]{0} %constant, s32[1]{0} %constant, s32[1]{0} %start_index), dimensions={0}
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[3]{0} %concatenate), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicSliceScalarIndices",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[])->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicUpdateSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[4]{0})->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_indices: s32[4]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_indices = s32[4]{0} parameter(2)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[4]{0} %start_indices)
}
)"
},
{
"DynamicUpdateSliceScalarIndex",
R"(HloModule DynamicUpdateSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[], s32[], s32[], s32[])->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)"
},
{
"BatchNormTraining",
R"(HloModule BasicTraining_module, entry_computation_layout={()->(f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BasicTraining.v4 () -> (f32[2,2,1,2], f32[2], f32[2]) {
%constant = f32[2,2,1,2]{3,2,1,0} constant({ { { { 1, 2 } }, { { 3, 4 } } }, { { { 5, 6 } }, { { 7, 8 } } } })
%constant.1 = f32[2]{0} constant({2, 3})
%constant.2 = f32[2]{0} constant({1, 2})
ROOT %batch-norm-training = (f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-training(f32[2,2,1,2]{3,2,1,0} %constant, f32[2]{0} %constant.1, f32[2]{0} %constant.2), epsilon=0.001, feature_index=3
}
)"
},
{
"BatchNormInference",
R"(HloModule BatchNormInference_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0})->f32[2,2,2,2]{3,2,1,0}}
ENTRY %BatchNormInference.v6 (input: f32[2,2,2,2], offset: f32[2], scale: f32[2], mean: f32[2], variance: f32[2]) -> f32[2,2,2,2] {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%offset = f32[2]{0} parameter(1)
%scale = f32[2]{0} parameter(2)
%mean = f32[2]{0} parameter(3)
%variance = f32[2]{0} parameter(4)
ROOT %batch-norm-inference = f32[2,2,2,2]{3,2,1,0} batch-norm-inference(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %offset, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance), epsilon=0.001, feature_index=0
}
)"
},
{
"BatchNormGrad",
R"(HloModule BatchNormGrad_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,2,2,2]{3,2,1,0})->(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BatchNormGrad.v4 (input: f32[2,2,2,2], scale: f32[2], mean: f32[2], variance: f32[2], grad_output: f32[2,2,2,2]) -> (f32[2,2,2,2], f32[2], f32[2]) {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%scale = f32[2]{0} parameter(1)
%mean = f32[2]{0} parameter(2)
%variance = f32[2]{0} parameter(3)
%grad_output = f32[2,2,2,2]{3,2,1,0} parameter(4)
ROOT %batch-norm-grad = (f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-grad(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance, f32[2,2,2,2]{3,2,1,0} %grad_output), epsilon=0.001, feature_index=0
}
)"
},
{
"Fft",
R"(HloModule Fft_module, entry_computation_layout={(c64[8,32]{1,0})->c64[8,32]{1,0}}
ENTRY %Fft (input: c64[8,32]) -> c64[8,32] {
%input = c64[8,32]{1,0} parameter(0)
ROOT %fft = c64[8,32]{1,0} fft(c64[8,32]{1,0} %input), fft_type=FFT, fft_length={32}
}
)"
},
{
"Ifft2d",
R"(HloModule Ifft2d_module, entry_computation_layout={(c64[5,8,32]{2,1,0})->c64[5,8,32]{2,1,0}}
ENTRY %Ifft2d (input: c64[5,8,32]) -> c64[5,8,32] {
%input = c64[5,8,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,8,32]{2,1,0} fft(c64[5,8,32]{2,1,0} %input), fft_type=IFFT, fft_length={8,32}
}
)"
},
{
"Rfft2d",
R"(HloModule Rfft2d_module, entry_computation_layout={(f32[5,64,32]{2,1,0})->c64[5,64,17]{2,1,0}}
ENTRY %Rfft2d (input: f32[5,64,32]) -> c64[5,64,17] {
%input = f32[5,64,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,64,17]{2,1,0} fft(f32[5,64,32]{2,1,0} %input), fft_type=RFFT, fft_length={64,32}
}
)"
},
{
"Irfft3d",
R"(HloModule Irfft3d_module, entry_computation_layout={(c64[5,64,128,33]{3,2,1,0})->f32[5,64,128,64]{3,2,1,0}}
ENTRY %Irfft3d (input: c64[5,64,128,33]) -> f32[5,64,128,64] {
%input = c64[5,64,128,33]{3,2,1,0} parameter(0)
ROOT %fft = f32[5,64,128,64]{3,2,1,0} fft(c64[5,64,128,33]{3,2,1,0} %input), fft_type=IRFFT, fft_length={64,128,64}
}
)"
},
{
"Pad",
R"(HloModule Pad1DS3Array_module, entry_computation_layout={()->f32[7]{0}}
ENTRY %Pad1DS3Array.v3 () -> f32[7] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = f32[] constant(0.1)
ROOT %pad = f32[7]{0} pad(f32[3]{0} %constant, f32[] %constant.1), padding=3_1
}
)"
},
{
"PadHasInterior",
R"(HloModule PadHasInterior_module, entry_computation_layout={(f32[1,25,7,7]{3,2,1,0})->f32[1,25,17,11]{3,2,1,0}}
ENTRY %PadHasInterior.v3 (input: f32[1,25,7,7]) -> f32[1,25,17,11] {
%input = f32[1,25,7,7]{3,2,1,0} parameter(0)
%constant = f32[] constant(-5.123)
ROOT %pad = f32[1,25,17,11]{3,2,1,0} pad(f32[1,25,7,7]{3,2,1,0} %input, f32[] %constant), padding=0_0_0x0_0_0x2_2_1x2_2_0
}
)"
},
{
"RoundNearestEven",
R"(HloModule RoundNearestEven_module, entry_computation_layout={(f32[2,2]{1,0})->f32[2,2]{1,0}}
ENTRY %RoundNearestEven (input: f32[2,2]) -> f32[2,2] {
%input = f32[2,2]{1,0} parameter(0)
ROOT %round-nearest-even = f32[2,2]{1,0} round-nearest-even(f32[2,2]{1,0} %input)
}
)"
},
{
"PadHasNegativePadding",
R"(HloModule PadHasNegativePadding_module, entry_computation_layout={(f32[1,25,7,7,10]{4,3,2,1,0})->f32[1,15,6,3,35]{4,3,2,1,0}}
ENTRY %PadHasNegativePadding (input: f32[1,25,7,7,10]) -> f32[1,15,6,3,35] {
%input = f32[1,25,7,7,10]{4,3,2,1,0} parameter(0)
%constant = f32[] constant(-5.123)
ROOT %pad = f32[1,15,6,3,35]{4,3,2,1,0} pad(f32[1,25,7,7,10]{4,3,2,1,0} %input, f32[] %constant), padding=0_0_0x0_-10_0x0_-1_0x-2_-2_0x-1_-1_3
}
)"
},
{
"Fusion",
R"(HloModule fusion_module, entry_computation_layout={()->f32[3,2,1,1]{3,2,1,0}}
%fused_computation (constant.param_0: f32[3,2,1,1], constant.1.param_1: f32[2]) -> f32[3,2,1,1] {
%constant.param_0 = f32[3,2,1,1]{3,2,1,0} parameter(0)
%constant.1.param_1 = f32[2]{0} parameter(1)
%broadcast = f32[3,2,1,1]{3,2,1,0} broadcast(f32[2]{0} %constant.1.param_1), dimensions={1}
ROOT %subtract = f32[3,2,1,1]{3,2,1,0} subtract(f32[3,2,1,1]{3,2,1,0} %constant.param_0, f32[3,2,1,1]{3,2,1,0} %broadcast)
}
ENTRY %fusion.v3 () -> f32[3,2,1,1] {
%constant = f32[3,2,1,1]{3,2,1,0} constant({ { { {-1} }, { {4.1} } }, { { {2} }, { {4.1} } }, { { {5} }, { {4.4} } } })
%constant.1 = f32[2]{0} constant({3.14, 4.25})
ROOT %fusion = f32[3,2,1,1]{3,2,1,0} fusion(f32[3,2,1,1]{3,2,1,0} %constant, f32[2]{0} %constant.1), kind=kLoop, calls=%fused_computation
}
)"
},
{
"FusionWithAliasing",
R"(HloModule FusionWithAliasing, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2})}
%FusedComp (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[123,4], f32[2,2], f32[1,2,3]) {
%p1 = f32[123,4]{0,1} parameter(1)
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%elem1 = f32[2,2]{0,1} get-tuple-element((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0), index=0
%constant0 = f32[] constant(1)
%broadcast0 = f32[1,2,3]{0,1,2} broadcast(f32[] %constant0), dimensions={}
ROOT %tuple = (f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2}) tuple(f32[123,4]{0,1} %p1, f32[2,2]{0,1} %elem1, f32[1,2,3]{0,1,2} %broadcast0)
}
ENTRY %FusionWithAliasing (p0.1: (f32[2,2], f32[42,2,3]), p1.1: f32[123,4]) -> (f32[123,4], f32[2,2], f32[1,2,3]) {
%p0.1 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1.1 = f32[123,4]{0,1} parameter(1)
ROOT %fusion = (f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2}) fusion((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0.1, f32[123,4]{0,1} %p1.1), kind=kLoop, output_to_operand_aliasing={{0}: (1, {}), {1}: (0, {0})}, calls=%FusedComp
}
)"
},
{
"Gather",
R"(HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46], start_indices: s64[10,9,8,7,5]) -> f32[10,9,8,7,30,29,28,27,26] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, index_vector_dim=4, slice_sizes={30,29,28,27,26}
}
)"
},
{
"SortedGather",
R"(HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46], start_indices: s64[10,9,8,7,5]) -> f32[10,9,8,7,30,29,28,27,26] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, index_vector_dim=4, slice_sizes={30,29,28,27,26}, indices_are_sorted=true
}
)"
},
{
"BatchGather",
R"(HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,512], start_indices: s64[10,9,8,7,5,512]) -> f32[10,9,8,7,30,29,28,27,26,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} gather(f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,512]{5,4,3,2,1,0} %start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, operand_batching_dims={5}, start_indices_batching_dims={5}, index_vector_dim=4, slice_sizes={30,29,28,27,26,1}
}
)"
},
{
"Scatter",
R"(HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46]{4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates: f32[10,9,8,7,30,29,28,27,26]) -> f32[50,49,48,47,46] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=%add_F32.v3
}
)"
},
{
"BatchScatter",
R"(HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,512]{5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,512], scatter_indices: s64[10,9,8,7,5,512], updates: f32[10,9,8,7,30,29,28,27,26,512]) -> f32[50,49,48,47,46,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,512]{5,4,3,2,1,0} scatter(f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,512]{5,4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, input_batching_dims={5}, scatter_indices_batching_dims={5}, index_vector_dim=4, to_apply=%add_F32.v3
}
)"
},
{
"TupleScatter",
R"(HloModule TupleScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, bf16[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}, bf16[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->(f32[50,49,48,47,46]{4,3,2,1,0}, bf16[50,49,48,47,46]{4,3,2,1,0})}
%add_F32_mul_BF16 (lhs_0: f32[], lhs_1: bf16[], rhs_0: f32[], rhs_1: bf16[]) -> (f32[], bf16[]) {
%lhs_0 = f32[] parameter(0)
%rhs_0 = f32[] parameter(2)
%add = f32[] add(f32[] %lhs_0, f32[] %rhs_0)
%lhs_1 = bf16[] parameter(1)
%rhs_1 = bf16[] parameter(3)
%mul = bf16[] multiply(bf16[] %lhs_1, bf16[] %rhs_1)
ROOT %tuple = (f32[], bf16[]) tuple(f32[] %add, bf16[] %mul)
}
ENTRY %Scatter (input_0: f32[50,49,48,47,46], input_1: bf16[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates_0: f32[10,9,8,7,30,29,28,27,26], updates_1: bf16[10,9,8,7,30,29,28,27,26]) -> (f32[50,49,48,47,46], bf16[50,49,48,47,46]) {
%input_0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%input_1 = bf16[50,49,48,47,46]{4,3,2,1,0} parameter(1)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(2)
%updates_0 = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(3)
%updates_1 = bf16[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(4)
ROOT %scatter = (f32[50,49,48,47,46]{4,3,2,1,0}, bf16[50,49,48,47,46]{4,3,2,1,0}) scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_0, bf16[50,49,48,47,46]{4,3,2,1,0} %input_1, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates_0, bf16[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates_1), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=%add_F32_mul_BF16
}
)"
},
{
"SortedScatter",
R"(HloModule StringifySortedScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46]{4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates: f32[10,9,8,7,30,29,28,27,26]) -> f32[50,49,48,47,46] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, indices_are_sorted=true, to_apply=%add_F32.v3
}
)"
},
{
"UniqueIndicesScatter",
R"(HloModule StringifyUniqueIndicesScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46]{4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates: f32[10,9,8,7,30,29,28,27,26]) -> f32[50,49,48,47,46] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, unique_indices=true, to_apply=%add_F32.v3
}
)"
},
{
"ConstantUnsignedNoUnderflow",
R"(HloModule ConstantUnsignedNoUnderflow_module, entry_computation_layout={()->u64[]}
ENTRY %ConstantUnsignedNoUnderflow () -> u64[] {
ROOT %constant = u64[] constant(1)
}
)"
},
{
"ConstantUnsignedNoOverflow",
R"(HloModule ConstantUnsignedNoOverflow_module, entry_computation_layout={()->u64[]}
ENTRY %ConstantUnsignedNoOverflow () -> u64[] {
ROOT %constant = u64[] constant(9223372036854775807)
}
)"
},
{
"CustomCallWithLayoutConstraints",
R"(HloModule CustomCallWithLayoutConstraints, entry_computation_layout={(f32[42,2,3]{0,1,2}, f32[123,4]{0,1})->f32[1,2,3]{0,2,1}}
ENTRY %CustomCallWithLayoutConstraints (p0: f32[42,2,3], p1: f32[123,4]) -> f32[1,2,3] {
%p0 = f32[42,2,3]{0,1,2} parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[42,2,3]{0,1,2} %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={f32[42,2,3]{0,1,2}, f32[123,4]{1,0}}
}
)"
},
{
"CustomCallWithLayoutConstraintsNoOperands",
R"(HloModule CustomCallWithLayoutConstraintsNoOperands, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCallWithLayoutConstraints () -> f32[1,2,3] {
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(), custom_call_target="baz", operand_layout_constraints={}
}
)"
},
{
"CustomCallWithLayoutConstraintsTupleShapes",
R"(HloModule CustomCallWithLayoutConstraintsTupleShapes, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0})}
ENTRY %CustomCallWithLayoutConstraints (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[1,2,3], f32[1,2,3]) {
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = (f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0}) custom-call((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={(f32[2,2]{1,0}, f32[42,2,3]{2,0,1}), f32[123,4]{1,0}}
}
)"
},
{
"CustomCallWithHasSideEffect",
R"(HloModule CustomCallWithHasSideEffect, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0})}
ENTRY %CustomCallWithHasSideEffect (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[1,2,3], f32[1,2,3]) {
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = (f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0}) custom-call((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", custom_call_has_side_effect=true
}
)"
},
{
"CustomCallWithAliasing",
R"(HloModule CustomCallWithAliasing, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2})}
ENTRY %CustomCallWithAliasing (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[123,4], f32[2,2], f32[1,2,3]) {
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = (f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2}) custom-call((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", output_to_operand_aliasing={{0}: (1, {}), {1}: (0, {0})}
}
)"
},
{
"CustomCallWithSchedule",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
%custom-call.0 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", schedule=SCHEDULE_EARLIEST
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1,2,3]{0,2,1} %custom-call.0), custom_call_target="bar", schedule=SCHEDULE_LATEST
}
)"
},
{
"CustomCallWithStatusReturningVersion",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", api_version=API_VERSION_STATUS_RETURNING
}
)"
},
{
"ParseC64Literal",
R"(HloModule ParseC64Literal, entry_computation_layout={()->c64[2]{0}}
ENTRY %ParseC64Literal () -> c64[2] {
ROOT %c = c64[2]{0} constant({(1, 2), (-inf, nan)})
}
)"
},
{
"ParseC128Literal",
R"(HloModule ParseC128Literal, entry_computation_layout={()->c128[2]{0}}
ENTRY %ParseC128Literal () -> c128[2] {
ROOT %c = c128[2]{0} constant({(1, 2), (-inf, nan)})
}
)"
},
{
"IndexedConditional",
R"(HloModule indexed_conditional, entry_computation_layout={()->f32[]}
%Negate (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
ROOT %negate = f32[] negate(f32[] %x)
}
%Identity (y: f32[]) -> f32[] {
%y = f32[] parameter(0)
ROOT %copy = f32[] copy(f32[] %y)
}
%Floor (z: f32[]) -> f32[] {
%z = f32[] parameter(0)
ROOT %floor = f32[] floor(f32[] %z)
}
ENTRY %Parameters1.v4 () -> f32[] {
%constant = s32[] constant(1)
%constant.1 = f32[] constant(56)
%constant.2 = f32[] constant(12)
%constant.3 = f32[] constant(13)
ROOT %conditional = f32[] conditional(s32[] %constant, f32[] %constant.1, f32[] %constant.2, f32[] %constant.3), branch_computations={%Negate, %Identity, %Floor}
}
)"
},
{
"RngGetAndUpdateState",
R"(HloModule rng_get_and_update_state, entry_computation_layout={()->u64[2]{0}}
ENTRY %RngGetAndUpdateState () -> u64[2] {
ROOT %rng-get-and-update-state = u64[2]{0} rng-get-and-update-state(), delta=4096
}
)"
},
{
"RngBitGenerator",
R"(HloModule gng_bit_generator, entry_computation_layout={(u64[2]{0})->(u64[2]{0}, u32[11,17]{1,0})}
ENTRY %RngBitGenerator (p0: u64[2]) -> (u64[2], u32[11,17]) {
%p0 = u64[2]{0} parameter(0)
ROOT %rand = (u64[2]{0}, u32[11,17]{1,0}) rng-bit-generator(u64[2]{0} %p0), algorithm=rng_three_fry
}
)"
},
{
"AsyncOpsWithSyntaxSugar",
R"(HloModule AsyncOpsWithSyntaxSugar, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)"
},
{
"AsyncOpsWithSyntaxSugarAndThreadName",
R"(HloModule AsyncOpsWithSyntaxSugarAndThreadName, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)"
},
{
"HloComputationWithParallelThreadName",
R"(HloModule HloComputationWithParallelThreadName, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}, execution_thread="main_thread"
)"
},
{
"MetadataFields",
R"(HloModule test, entry_computation_layout={(f32[100]{0})->u32[100]{0}}
ENTRY %test (p: f32[100]) -> u32[100] {
%p = f32[100]{0} parameter(0)
ROOT %root = u32[100]{0} bitcast-convert(f32[100]{0} %p), metadata={op_type="a" op_name="b" source_file="c" source_line=1 profile_type={1} deduplicated_name="d" scheduling_name="foo"}
}
)"
},
{
"MetadataPreserveLayout",
R"(HloModule test, entry_computation_layout={(f32[100]{0})->u32[100]{0}}
ENTRY %test (p: f32[100]) -> u32[100] {
%p = f32[100]{0} parameter(0)
ROOT %root = u32[100]{0} bitcast-convert(f32[100]{0} %p), metadata={op_type="a" op_name="b" source_file="c" source_line=1 profile_type={1} deduplicated_name="d" preserve_layout=true}
}
)"
},
{
"OriginalValue",
R"(HloModule test, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->((f32[], f32[3]{0}), f32[2,3]{1,0})}
ENTRY %test (v1: f32[], v2: f32[3], v3: f32[2,3]) -> ((f32[], f32[3]), f32[2,3]) {
%v1 = f32[] parameter(0), origin={{"v1"}}
%v2 = f32[3]{0} parameter(1), origin={{"v2"}}
%tuple = (f32[], f32[3]{0}) tuple(f32[] %v1, f32[3]{0} %v2), origin={({"v1"}, {"v2"})}
%v3 = f32[2,3]{1,0} parameter(2), origin={{"v3"}}
ROOT %nested_tuple = ((f32[], f32[3]{0}), f32[2,3]{1,0}) tuple((f32[], f32[3]{0}) %tuple, f32[2,3]{1,0} %v3), origin={(({"v1"}, {"v2"}), {"v3"})}
}
)"
},
});
}
std::vector<TestData> CreateShortTestCases() {
return std::vector<TestData>({
{
"Map",
R"(HloModule MapBinaryAdder_module, entry_computation_layout={(f32[4]{0}, f32[4]{0})->f32[4]{0}}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY MapBinaryAdder.v3 {
param0 = f32[4]{0} parameter(0)
param1 = f32[4]{0} parameter(1)
ROOT map = f32[4]{0} map(param0, param1), dimensions={0}, to_apply=add_F32.v3
}
)"
},
{
"Reduce",
R"(HloModule ReduceR3ToR2_module, entry_computation_layout={(f32[8,16,256]{2,1,0})->f32[8,16]{1,0}}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY ReduceR3ToR2.v3 {
input = f32[8,16,256]{2,1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
}
)"
},
{
"TupleReduce",
R"(HloModule TupleReduce, entry_computation_layout={(f32[1024]{0}, s32[1024]{0})->(f32[], s32[])}
max_argmax {
value = f32[] parameter(2)
prev_max = f32[] parameter(0)
is_next_larger = pred[] compare(value, prev_max), direction=GE
max = f32[] select(is_next_larger, value, prev_max)
index = s32[] parameter(3)
prev_argmax = s32[] parameter(1)
argmax = s32[] select(is_next_larger, index, prev_argmax)
ROOT pair = (f32[], s32[]) tuple(max, argmax)
}
ENTRY reduce_entry {
values = f32[1024]{0} parameter(0)
indices = s32[1024]{0} parameter(1)
init_value = f32[] constant(-inf)
init_index = s32[] constant(-1)
ROOT result = (f32[], s32[]) reduce(values, indices, init_value, init_index), dimensions={0}, to_apply=max_argmax
}
)"
},
{
"InfeedOutfeed",
R"(HloModule outfeed_module, entry_computation_layout={()->((u32[3]{0}, pred[]), token[])}
ENTRY InfeedToOutfeed {
token0 = token[] after-all()
infeed = ((u32[3]{0}, pred[]), token[]) infeed(token0)
infeed.data = (u32[3]{0}, pred[]) get-tuple-element(infeed), index=0
outfeed = token[] outfeed(infeed.data, token0), outfeed_shape=(u32[3]{0}, pred[])
ROOT infeed.1 = ((u32[3]{0}, pred[]), token[]) infeed(token0)
infeed.1.data = (u32[3]{0}, pred[]) get-tuple-element(infeed.1), index=0
infeed.1.token = token[] get-tuple-element(infeed.1), index=1
outfeed.1 = token[] outfeed(infeed.1.data, infeed.1.token), outfeed_shape=(u32[3]{0}, pred[])
}
)"
},
{
"Rng",
R"(HloModule rng_module, entry_computation_layout={()->f32[8]{0}}
ENTRY Rng {
constant = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng = f32[8]{0} rng(constant, constant.1), distribution=rng_uniform
}
)"
},
{
"ReducePrecision",
R"(HloModule reduce_precision, entry_computation_layout={()->f32[1]{0}}
ENTRY ReducePrecision {
constant = f32[1]{0} constant({3.14159})
ROOT reduce-precision = f32[1]{0} reduce-precision(constant), exponent_bits=8, mantissa_bits=10
}
)"
},
{
"SortKey",
R"(HloModule sort, entry_computation_layout={(f32[1024]{0})->f32[1024]{0}}
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
x = f32[1024]{0} parameter(0)
ROOT sorted = f32[1024]{0} sort(x), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyValue",
R"(HloModule sort, entry_computation_layout={(f32[1024]{0}, s32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024]{0} parameter(0)
values = s32[1024]{0} parameter(1)
ROOT sorted = (f32[1024]{0}, s32[1024]{0}) sort(keys, values), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyR2",
R"(HloModule sort, entry_computation_layout={(f32[1024,16]{0,1})->f32[1024,16]{0,1}}
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
x = f32[1024,16]{0,1} parameter(0)
ROOT sorted = f32[1024,16]{0,1} sort(x), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyValueR2",
R"(HloModule sort, entry_computation_layout={(f32[1024,16]{0,1}, s32[1024,16]{0,1})->(f32[1024,16]{0,1}, s32[1024,16]{0,1})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024,16]{0,1} parameter(0)
values = s32[1024,16]{0,1} parameter(1)
ROOT sorted = (f32[1024,16]{0,1}, s32[1024,16]{0,1}) sort(keys, values), dimensions={0}, to_apply=compare
}
)"
},
{
"SortManyValues",
R"(HloModule sort, entry_computation_layout={(f32[1024,16]{0,1}, s32[1024,16]{0,1}, u32[1024,16]{0,1}, f32[1024,16]{0,1})->(f32[1024,16]{0,1}, s32[1024,16]{0,1}, u32[1024,16]{0,1}, f32[1024,16]{0,1})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.2.lhs = u32[] parameter(4)
p.2.rhs = u32[] parameter(5)
p.3.lhs = f32[] parameter(6)
p.3.rhs = f32[] parameter(7)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024,16]{0,1} parameter(0)
values.0 = s32[1024,16]{0,1} parameter(1)
values.1 = u32[1024,16]{0,1} parameter(2)
values.2 = f32[1024,16]{0,1} parameter(3)
ROOT sorted = (f32[1024,16]{0,1}, s32[1024,16]{0,1}, u32[1024,16]{0,1}, f32[1024,16]{0,1}) sort(keys, values.0, values.1, values.2), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyStable",
R"(HloModule sort, entry_computation_layout={(f32[1024]{0})->f32[1024]{0}}
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
x = f32[1024]{0} parameter(0)
ROOT sorted = f32[1024]{0} sort(x), dimensions={0}, is_stable=true, to_apply=compare
}
)"
},
{
"TopK",
R"(HloModule topk, entry_computation_layout={(f32[10,10]{0,1})->(f32[10,2]{0,1}, s32[10,2]{0,1})}
ENTRY TopK {
x = f32[10,10]{0,1} parameter(0)
ROOT topk = (f32[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)"
},
{
"IndexedConditional",
R"(HloModule indexed_conditional, entry_computation_layout={()->f32[]}
Negate {
x = f32[] parameter(0)
ROOT negate = f32[] negate(x)
}
Identity {
y = f32[] parameter(0)
ROOT copy = f32[] copy(y)
}
Floor {
z = f32[] parameter(0)
ROOT floor = f32[] floor(z)
}
ENTRY Parameters1.v4 {
constant = s32[] constant(1)
constant.1 = f32[] constant(56)
constant.2 = f32[] constant(12)
constant.3 = f32[] constant(13)
ROOT conditional = f32[] conditional(constant, constant.1, constant.2, constant.3), branch_computations={Negate, Identity, Floor}
}
)"
},
{
"PredicatedConditional",
R"(HloModule pred_conditional, entry_computation_layout={()->f32[]}
Negate {
x = f32[] parameter(0)
ROOT negate = f32[] negate(x)
}
Identity {
y = f32[] parameter(0)
ROOT copy = f32[] copy(y)
}
ENTRY Parameters1.v4 {
constant = pred[] constant(true)
constant.1 = f32[] constant(56)
constant.2 = f32[] constant(12)
ROOT conditional = f32[] conditional(constant, constant.1, constant.2), true_computation=Negate, false_computation=Identity
}
)"
},
{
"CustomCall",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY CustomCall {
constant = f32[1]{0} constant({12345})
ROOT custom-call = f32[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo\"bar"
}
)"
},
{
"CustumCallSingleComp",
R"(HloModule custom_call_with_comp, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
max_F32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY CustomCall {
constant = f32[1]{0} constant({12345})
ROOT custom-call = f32[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo\"bar", called_computations={max_F32}
}
)"
},
{
"CustumCallMultipleComps",
R"(HloModule custom_call_with_comps, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
max_F32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY CustomCall {
constant = f32[1]{0} constant({12345})
ROOT custom-call = f32[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo\"bar", called_computations={max_F32, max_F32}
}
)"
},
{
"NonDefaultNames",
R"(HloModule add_constants_module, entry_computation_layout={()->f32[]}
ENTRY add_constants {
foo = f32[] constant(3.14)
ROOT bar = f32[] add(foo, foo)
}
)"
},
{
"Dot",
R"(HloModule dot, entry_computation_layout={(f32[2,10]{1,0}, f32[10,2]{1,0})->f32[2]{0}}
ENTRY dot {
a = f32[2,10]{1,0} parameter(0)
b = f32[10,2]{1,0} parameter(1)
ROOT dot = f32[2]{0} dot(a, b), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}
}
)"
},
{
"DotSparseOperand",
R"(HloModule dot, entry_computation_layout={(f16[32,32]{1,0}, f16[64,32]{1,0}, u16[32,4]{1,0})->f16[32,32]{1,0}}
ENTRY dot {
a = f16[32,32]{1,0} parameter(0)
b = f16[64,32]{1,0} parameter(1)
meta = u16[32,4]{1,0} parameter(2)
ROOT dot = f16[32,32]{1,0} dot(a, b, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
)"
},
{
"DotSparseOperands",
R"(HloModule dot, entry_computation_layout={(f16[32,32]{1,0}, f16[32,32]{1,0}, u16[32,4]{1,0}, u16[4,32]{1,0})->f16[32,32]{1,0}}
ENTRY dot {
a = f16[32,32]{1,0} parameter(0)
b = f16[32,32]{1,0} parameter(1)
a_meta = u16[32,4]{1,0} parameter(2)
b_meta = u16[4,32]{1,0} parameter(3)
ROOT dot = f16[32,32]{1,0} dot(a, b, a_meta, b_meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4_R.0@2:4
}
)"
},
{
"DotWithAlgorithm",
R"(HloModule dot, entry_computation_layout={(f32[2,10]{1,0}, f32[10,2]{1,0})->f32[2]{0}}
ENTRY dot {
a = f32[2,10]{1,0} parameter(0)
b = f32[10,2]{1,0} parameter(1)
ROOT dot = f32[2]{0} dot(a, b), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}, algorithm=dot_tf32_tf32_f32
}
)"
},
{
"gather",
R"(HloModule gather, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}}
ENTRY Gather {
input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
start_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} gather(input_tensor, start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, index_vector_dim=4, slice_sizes={30,29,28,27,26}
}
)"
},
{
"AllReduce",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={}, to_apply=add
}
)"
},
{
"AllReduceWithSubgroups",
R"(HloModule CRS_Subgroups, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY AllReduceWithSubgroups {
input = f32[128,32]{0,1} parameter(0)
ROOT all-reduce = f32[128,32]{0,1} all-reduce(input), replica_groups={{0,1},{2,3}}, to_apply=add
}
)",
4,
},
{
"AllReduceWithSubgroupsIotaList",
R"(HloModule CRS_Subgroups, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=20
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY AllReduceWithSubgroupsIotaList {
input = f32[128,32]{0,1} parameter(0)
ROOT all-reduce = f32[128,32]{0,1} all-reduce(input), replica_groups=[2,10]<=[20], to_apply=add
}
)",
20,
},
{
"AllReduceWithLayout",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={}, constrain_layout=true, to_apply=add
}
)"
},
{
"AllReduceAllReduce",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
crs.1 = f32[8]{0} all-reduce(input), channel_id=1, replica_groups={{0}}, to_apply=add
ROOT crs.0 = f32[8]{0} all-reduce(input), channel_id=1, replica_groups={{0}}, to_apply=add
}
)"
},
{
"AllReduceStartAndDone",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
crs = f32[8]{0} all-reduce-start(input), replica_groups={}, to_apply=add
ROOT done = f32[8]{0} all-reduce-done(crs)
}
)"
},
{
"ReduceScatter",
R"(HloModule RS, entry_computation_layout={(f32[8]{0})->f32[4]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT ars = f32[4]{0} reduce-scatter(input), replica_groups={{0,1}}, dimensions={0}, to_apply=add
}
)"
},
{
"AllGather",
R"(HloModule AllGather, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}
ENTRY AllGather {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,128]{0,1} all-gather(input), replica_groups={}, dimensions={1}
}
)"
},
{
"AllGatherWithLayout",
R"(HloModule AllGather, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}
ENTRY AllGather {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,128]{0,1} all-gather(input), replica_groups={}, constrain_layout=true, dimensions={1}
}
)"
},
{
"AllGatherWithSubgroups",
R"(HloModule AllGatherWithSubgroups, entry_computation_layout={(f32[128,32]{0,1})->f32[128,64]{0,1}}, replica_count=4
ENTRY AllGatherWithSubgroups {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,64]{0,1} all-gather(input), replica_groups={{0,1},{2,3}}, dimensions={1}
}
)",
4,
},
{
"AllGatherWithSubgroupsIotaList",
R"(HloModule AllGatherWithSubgroupsIotaList, entry_computation_layout={(f32[128,32]{0,1})->f32[128,320]{0,1}}, replica_count=30
ENTRY AllGatherWithSubgroupsIotaList {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,320]{0,1} all-gather(input), replica_groups=[3,10]<=[6,5]T(1,0), dimensions={1}
}
)",
30,
},
{
"AllToAll",
R"(HloModule AllToAll, entry_computation_layout={(f32[128,32]{0,1})->(f32[128,32]{0,1})}
ENTRY AllToAll {
input = f32[128,32]{0,1} parameter(0)
ROOT a2a = (f32[128,32]{0,1}) all-to-all(input), replica_groups={}
}
)"
},
{
"AllToAllWithSubgroups",
R"(HloModule AllToAllWithSubgroups, entry_computation_layout={(f32[128,32]{0,1}, f32[128,32]{0,1})->(f32[128,32]{0,1}, f32[128,32]{0,1})}, replica_count=4
ENTRY AllToAllWithSubgroups {
p0 = f32[128,32]{0,1} parameter(0)
p1 = f32[128,32]{0,1} parameter(1)
ROOT a2a = (f32[128,32]{0,1}, f32[128,32]{0,1}) all-to-all(p0, p1), replica_groups={{1,2},{3,0}}
}
)",
4,
},
{
"AllToAllWithSubgroupsIotaList",
R"(HloModule AllToAllWithSubgroupsIotaList, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=32
ENTRY AllToAllWithSubgroupsIotaList {
p0 = f32[128,32]{0,1} parameter(0)
ROOT a2a = f32[128,32]{0,1} all-to-all(p0), replica_groups=[4,8]<=[4,8]T(1,0), dimensions={0}
}
)",
40
},
{
"CollectiveBroadcast",
R"(HloModule CollectiveBroadcast, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
ENTRY CollectiveBroadcast {
input = f32[128,32]{0,1} parameter(0)
ROOT cb = f32[128,32]{0,1} collective-broadcast(input), replica_groups={{1,0},{2,3}}
}
)",
4,
},
{
"CollectivePermute",
R"(HloModule CollectivePermute, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
ENTRY CollectivePermute {
input = f32[128,32]{0,1} parameter(0)
ROOT root = f32[128,32]{0,1} collective-permute(input), source_target_pairs={{0,1},{1,2},{2,3}}
}
)",
4
},
{
"CollectivePermuteInPlaceUpdate",
R"(HloModule CollectivePermuteInPlaceUpdate, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}, replica_count=4
ENTRY CollectivePermuteInPlaceUpdate {
input = f32[128,32]{0,1} parameter(0)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT root = f32[128,128]{0,1} collective-permute(input, output, tuple.1, tuple.2), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{128,32}}
}
)",
4
},
{
"CollectivePermuteInPlaceUpdateMultipleReadWrite",
R"(HloModule CollectivePermuteInPlaceUpdateMultipleReadWrite, entry_computation_layout={(f32[8,8,128]{2,1,0})->f32[8,8,128]{2,1,0}}, replica_count=4
ENTRY CollectivePermuteInPlaceUpdate {
constant.3 = s32[] constant(2)
constant.1 = s32[] constant(0)
output_offset.3 = (s32[], s32[], s32[]) tuple(constant.3, constant.1, constant.1)
constant.4 = s32[] constant(3)
output_offset.4 = (s32[], s32[], s32[]) tuple(constant.4, constant.1, constant.1)
input = f32[8,8,128]{2,1,0} parameter(0)
constant = f32[] constant(1)
output = f32[8,8,128]{2,1,0} broadcast(constant), dimensions={}
input_offset.1 = (s32[], s32[], s32[]) tuple(constant.1, constant.1, constant.1)
constant.2 = s32[] constant(1)
input_offset.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.1, constant.1)
input_offset = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(input_offset.1, input_offset.2)
output_offset = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(input_offset.1, input_offset.2)
ROOT root = f32[8,8,128]{2,1,0} collective-permute(input, output, input_offset, output_offset), source_target_pairs={{0,1},{1,2},{2,3},{0,3},{2,1},{3,2}}, slice_sizes={{1,8,128},{1,8,128}}
}
)",
4
},
{
"CollectivePermuteInPlaceUpdateTupleMultipleReadWrite",
R"(HloModule hlo_runner_test_0.1, entry_computation_layout={()->(u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)})}, replica_count=4
ENTRY hlo_runner_test_0.1 {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(replica_id), dimensions={}
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(broadcast.0, broadcast.0)
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(constant.1), dimensions={}
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(broadcast.1, broadcast.2)
constant.2 = s32[] constant(0)
tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.2, constant.2)
constant.3 = s32[] constant(1)
tuple.3 = (s32[], s32[], s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(tuple.2, tuple.3)
tuple.7 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(tuple.2, tuple.2)
tuple.8 = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(tuple.4, tuple.7)
constant.4 = s32[] constant(2)
tuple.5 = (s32[], s32[], s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(tuple.2, tuple.5)
tuple.9 = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(tuple.4, tuple.6)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute(tuple.input, tuple.output, tuple.8, tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)",
4
},
{
"CollectivePermuteTupleInPlaceUpdate",
R"(HloModule CollectivePermuteTupleInPlaceUpdate, entry_computation_layout={(f32[128,32]{0,1})->(f32[128,128]{0,1}, f32[128,128]{0,1})}, replica_count=4
ENTRY CollectivePermuteInPlaceUpdate {
input = f32[128,32]{0,1} parameter(0)
tuple.input = (f32[128,32]{0,1}, f32[128,32]{0,1}) tuple(input, input)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
tuple.output = (f32[128,128]{0,1}, f32[128,128]{0,1}) tuple(output, output)
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.2, constant.1)
tuple.3 = ((s32[], s32[]), (s32[], s32[])) tuple(tuple.1, tuple.2)
tuple.4 = (s32[], s32[]) tuple(constant.1, constant.1)
tuple.5 = (s32[], s32[]) tuple(constant.2, constant.2)
tuple.6 = ((s32[], s32[]), (s32[], s32[])) tuple(tuple.4, tuple.5)
ROOT root = (f32[128,128]{0,1}, f32[128,128]{0,1}) collective-permute(tuple.input, tuple.output, tuple.3, tuple.6), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{64,32},{64,32}}
}
)",
4
},
{
"CollectivePermuteStartAndDone",
R"(HloModule CollectivePermuteStartAndDone, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
ENTRY CollectivePermuteStartAndDone {
input = f32[128,32]{0,1} parameter(0)
collective-permute-start.1 = (f32[128,32]{0,1}, f32[128,32]{0,1}, u32[], u32[]) collective-permute-start(input), source_target_pairs={{0,1},{1,2},{2,3}}
ROOT collective-permute-done.1 = f32[128,32]{0,1} collective-permute-done(collective-permute-start.1)
}
)",
4
},
{
"CollectivePermuteStartAndDoneInplaceUpdate",
R"(HloModule CollectivePermuteStartAndDoneInplaceUpdate, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}, replica_count=4
ENTRY CollectivePermuteStartAndDoneInplaceUpdate {
input = f32[128,32]{0,1} parameter(0)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.1, constant.2)
collective-permute-start.1 = (f32[128,32]{0,1}, f32[128,128]{0,1}, u32[], u32[]) collective-permute-start(input, output, tuple.1, tuple.2), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{64,32}}
ROOT collective-permute-done.1 = f32[128,128]{0,1} collective-permute-done(collective-permute-start.1)
}
)",
4
},
{
"ReplicaId",
R"(HloModule replica-id, entry_computation_layout={()->u32[]}
ENTRY Replica-id {
ROOT replica-id = u32[] replica-id()
}
)"
},
{
"PartitionId",
R"(HloModule partition-id, entry_computation_layout={()->u32[]}
ENTRY PartitionId {
ROOT id = u32[] partition-id()
}
)"
},
{
"Iota",
R"(HloModule iota, entry_computation_layout={()->f32[100]{0}}
ENTRY Iota {
ROOT iota = f32[100]{0} iota(), iota_dimension=0
}
)"
},
{
"CustomCallWithWindowAndDimLabelsAndFeatureGroupCount",
R"(HloModule CustomCallWithWindowAndDimLabelsAndFeatureGroupCount, entry_computation_layout={()->f32[100]{0}}
ENTRY Computation {
ROOT r = f32[100]{0} custom-call(), window={size=2x2}, dim_labels=b01f_01io->b01f, feature_group_count=2, custom_call_target="target"
}
)"
},
{
"CustomCallWithUnknownDimLabels",
R"(HloModule CustomCallWithUnknownDimLabels, entry_computation_layout={()->f32[100]{0}}
ENTRY Computation {
ROOT r = f32[100]{0} custom-call(), window={size=2x2}, dim_labels=?b01f_0?1io->b01?f, custom_call_target="target"
}
)"
},
{
"ScheduledModule",
R"(HloModule scheduled_module, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, s32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lhs = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024]{0} parameter(0)
values = s32[1024]{0} parameter(1)
ROOT sorted = (f32[1024]{0}, s32[1024]{0}) sort(keys, values), dimensions={0}, to_apply=compare
}
)"
},
{
"AfterAllWithMultipleOperands",
R"(HloModule AfterAllWithMultipleOperands, entry_computation_layout={(f32[])->token[]}
ENTRY AfterAllWithMultipleOperands {
p0 = f32[] parameter(0)
token0 = token[] after-all()
token1 = token[] after-all()
ROOT after-all = token[] after-all(p0, token0, token1)
}
)"
},
{
"AddDependency",
R"(HloModule AddDependency, entry_computation_layout={(f32[])->f32[]}
ENTRY AddDependency {
p = f32[] parameter(0)
neg = f32[] negate(p)
token0 = token[] after-all(neg)
p_after_token = f32[] add-dependency(p, token0)
exp = f32[] exponential(p_after_token)
ROOT sum = f32[] add(neg, exp)
}
)"
},
{
"MinMaxValues",
R"(HloModule MinMaxValues, entry_computation_layout={()->c128[2]{0}}
ENTRY MinMaxValues {
x.s4 = s4[2]{0} constant({-8, 7})
x.s8 = s8[2]{0} constant({-128, 127})
x.s16 = s16[2]{0} constant({-32768, 32767})
x.s32 = s32[2]{0} constant({-2147483648, 2147483647})
x.u4 = u4[2]{0} constant({0, 15})
x.u8 = u8[2]{0} constant({0, 255})
x.u16 = u16[2]{0} constant({0, 65535})
x.u32 = u32[2]{0} constant({0, 4294967295})
x.f16 = f16[2]{0} constant({-65504, 65504})
x.bf16 = bf16[2]{0} constant({-3.39e+38, 3.39e+38})
x.f32 = f32[2]{0} constant({-3.40282e+38, 3.40282e+38})
x.f64 = f64[2]{0} constant({-1.79769e+308, 1.79769e+308})
x.c64 = c64[2]{0} constant({(-3.40282e+38, 3.40282e+38), (3.40282e+38, -3.40282e+38)})
ROOT c.c128 = c128[2]{0} constant({(-1.79769e+308, 1.79769e+308), (1.79769e+308, -1.79769e+308)})
}
)"
},
{
"BitcastConvert",
R"(HloModule BitcastConvert, entry_computation_layout={(f32[100]{0})->u32[100]{0}}
ENTRY BitcastConvertUsage {
p = f32[100]{0} parameter(0)
ROOT out = u32[100]{0} bitcast-convert(p)
}
)"
},
});
}
std::vector<NonRoundtripTestData> CreateNonRoundtripTestCases() {
return std::vector<NonRoundtripTestData>({
{
"SimpleNesting",
R"(HloModule test
ENTRY test {
ROOT root = add(f32[10] parameter(0), multiply(f32[10] parameter(1), f32[10] parameter(2)))
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f32[10]{0}, f32[10]{0})->f32[10]{0}}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f32[10]{0} parameter(1)
parameter.anon.2 = f32[10]{0} parameter(2)
multiply.anon = f32[10]{0} multiply(parameter.anon.1, parameter.anon.2)
ROOT root = f32[10]{0} add(parameter.anon, multiply.anon)
})"
},
{
"AmbiguousNames",
R"(HloModule test
ENTRY test {
add = add(f32[10] parameter(0), f32[10] parameter(1))
ROOT add2 = add(add, add(add, add))
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f32[10]{0})->f32[10]{0}}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(parameter.anon, parameter.anon.1)
add.anon = f32[10]{0} add(add, add)
ROOT add2 = f32[10]{0} add(add, add.anon)
})"
},
{
"TupleShapeInsideAnonymousInstr",
R"(HloModule test
ENTRY test {
ROOT root = get-tuple-element(
(f32[10], f16[10]) tuple(f32[10] parameter(0), f16[10] parameter(1))
), index=0
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f16[10]{0})->f32[10]{0}}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f16[10]{0} parameter(1)
tuple.anon = (f32[10]{0}, f16[10]{0}) tuple(parameter.anon, parameter.anon.1)
ROOT root = f32[10]{0} get-tuple-element(tuple.anon), index=0
})"
},
{
"MixAnonAndNonAnonOperands",
R"(HloModule test
ENTRY test {
add = add(f32[10] parameter(0), f32[10] parameter(1))
ROOT root = tuple(add, add(add, add), add)
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f32[10]{0})->(f32[10]{0}, f32[10]{0}, f32[10]{0})}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(parameter.anon, parameter.anon.1)
add.anon = f32[10]{0} add(add, add)
ROOT root = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(add, add.anon, add)
})"
},
{
"BroadcastOfScalarDoesntNeedDimensionsAttr",
R"(HloModule test
ENTRY test {
ROOT root = sqrt(f32[10,10] broadcast(f32[] parameter(0)))
})",
R"(HloModule test, entry_computation_layout={(f32[])->f32[10,10]{1,0}}
ENTRY test {
parameter.anon = f32[] parameter(0)
broadcast.anon = f32[10,10]{1,0} broadcast(parameter.anon), dimensions={}
ROOT root = f32[10,10]{1,0} sqrt(broadcast.anon)
})"
},
{
"SparseShape",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)})->f32[10,10]{1,0:D(D,C)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)} parameter(0)
})",
},
{
"SparseShapeWithIndexPrimitiveType",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u32)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)#(u32)})->f32[10,10]{1,0:D(D,C)#(u32)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u32)} parameter(0)
})",
},
{
"SparseShapeWithPointerPrimitiveType",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)*(u32)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)*(u32)})->f32[10,10]{1,0:D(D,C)*(u32)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)*(u32)} parameter(0)
})",
},
{
"SparseShapeWithPhysicalShape",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))})->f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
},
{
"SparseShapeFull",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))})->f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
},
{
"SparseCOO",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+,S)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(C+,S)})->f32[10,10]{1,0:D(C+,S)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+,S)} parameter(0)
})",
},
{
"SparseCOOUnordered",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+~,S~)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(C+~,S~)})->f32[10,10]{1,0:D(C+~,S~)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+~,S~)} parameter(0)
})",
},
});
}
template <bool short_form, bool proto_round_trip>
class HloParameterizedParserTest
: public ::testing::Test,
public ::testing::WithParamInterface<TestData> {
protected:
void ExpectEqual() {
std::unique_ptr<HloModule> module;
const std::string& original = GetParam().module_string;
HloModuleConfig config;
config.set_replica_count(GetParam().replica_count);
if (GetParam().enable_verification) {
auto verified_module = std::make_unique<VerifiedHloModule>(
GetParam().test_name, config,
false,
true,
ShapeUtil::ByteSizeOfElements);
TF_ASSERT_OK(verified_module->ParseHloStringAndVerifyModule(original));
module = std::move(verified_module);
} else {
TF_ASSERT_OK_AND_ASSIGN(module,
ParseAndReturnUnverifiedModule(original, config));
}
if (proto_round_trip) {
TF_ASSERT_OK_AND_ASSIGN(module, HloModule::CreateFromProto(
module->ToProto(), module->config()));
}
if (short_form) {
EXPECT_EQ(original, module->ToString(HloPrintOptions::ShortParsable()));
} else {
EXPECT_EQ(
original,
module->ToString(HloPrintOptions().set_print_large_constants(true)));
}
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_body()->WhileCallInstruction(), instr);
EXPECT_TRUE(instr->while_body()->IsWhileBodyComputation());
}
}
}
}
};
using HloParserTestLong = HloParameterizedParserTest<false, false>;
using HloParserTestLongProto = HloParameterizedParserTest<false, true>;
using HloParserTestShort = HloParameterizedParserTest<true, false>;
using HloParserTestShortProto = HloParameterizedParserTest<true, true>;
TEST_P(HloParserTestLong, Run) { ExpectEqual(); }
TEST_P(HloParserTestLongProto, Run) { ExpectEqual(); }
TEST_P(HloParserTestShort, Run) { ExpectEqual(); }
TEST_P(HloParserTestShortProto, Run) { ExpectEqual(); }
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation, HloParserTestLong,
::testing::ValuesIn(CreateTestCases()),
TestDataToString);
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation,
HloParserTestLongProto,
::testing::ValuesIn(CreateTestCases()),
TestDataToString);
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation, HloParserTestShort,
::testing::ValuesIn(CreateShortTestCases()),
TestDataToString);
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation,
HloParserTestShortProto,
::testing::ValuesIn(CreateShortTestCases()),
TestDataToString);
class HloNonRoundtripParserTest
: public ::testing::TestWithParam<NonRoundtripTestData> {};
TEST_P(HloNonRoundtripParserTest, Run) {
auto module = std::make_unique<VerifiedHloModule>(
GetParam().test_name, HloModuleConfig{},
false,
true,
ShapeUtil::ByteSizeOfElements);
TF_ASSERT_OK(
module->ParseHloStringAndVerifyModule(GetParam().input_module_string));
EXPECT_EQ(absl::StripAsciiWhitespace(GetParam().output_module_string),
absl::StripAsciiWhitespace(
module->ToString(HloPrintOptions::ShortParsable())));
}
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation,
HloNonRoundtripParserTest,
::testing::ValuesIn(CreateNonRoundtripTestCases()),
NonRoundtripTestDataToString);
class HloParserTest : public ::testing::Test {
protected:
static void ExpectHasSubstr(string_view s, string_view expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
absl::StatusOr<std::unique_ptr<VerifiedHloModule>>
ParseAndReturnVerifiedModule(absl::string_view hlo_text) {
auto module = std::make_unique<VerifiedHloModule>(
::testing::UnitTest::GetInstance()->current_test_info()->name(),
HloModuleConfig(),
false,
true,
ShapeUtil::ByteSizeOfElements);
TF_RETURN_IF_ERROR(module->ParseHloStringAndVerifyModule(hlo_text));
return std::move(module);
}
};
TEST_F(HloParserTest, Empty) {
const std::string original = "";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, Garbage) {
const std::string original =
"HloModule thi$ str1ng makes# N0 sen$e @all!*&^%$";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, WrongOpcode) {
const std::string original = R"(HloModule wrong_opcode:
ENTRY %blabla (x: f32[], y: f32[]) -> f32[] {
%x = f32[]{} parameter(0)
%y = f32[]{} parameter(1)
%le = pred[]{} le(f32[]{} %x, f32[]{} %y)
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, MetadataWithCholesky) {
const std::string original = R"(HloModule metadata_with_cholesky
ENTRY %blabla (a: f32[1,291,291]) -> f32[1,291,291] {
%a = f32[1,291,291] parameter(0)
%out = f32[1,291,291] cholesky(f32[1,291,291] %a), lower=true, metadata={op_type="Cholesky" op_name="Cholesky" profile_type={1}}
}
)";
auto result = ParseAndReturnVerifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
EXPECT_EQ("Cholesky", result.value()
->entry_computation()
->root_instruction()
->metadata()
.op_name());
EXPECT_EQ("Cholesky", result.value()
->entry_computation()
->root_instruction()
->metadata()
.op_type());
EXPECT_EQ(WINDOW, *result.value()
->entry_computation()
->root_instruction()
->metadata()
.profile_type()
.begin());
}
TEST_F(HloParserTest, WrongShape) {
const std::string original = R"(HloModule wrong_opcode:
ENTRY %blabla (x: g32[]) -> g32[] {
%x = g32[]{} parameter(0)
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, WrongOperandsSize) {
const std::string original = R"(HloModule wrong_opcode:
ENTRY %blabla (x: f32[]) -> pred[] {
%x = f32[]{} parameter(0)
%eq = pred[]{} compare(f32[]{} %x), direction=EQ
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, OperandNotFound) {
const std::string original = R"(HloModule operand_not_found:
ENTRY %blabla (x: f32[]) -> pred[] {
%x = f32[]{} parameter(0)
%eq = pred[]{} compare(f32[]{} %x, f32[]{} %y), direction=EQ
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, MoreConstants) {
const std::string original = R"(HloModule SelectScalarS32True_module
ENTRY %SelectScalarS32True.v4 () -> s32[] {
%constant.2 = pred[] constant(true)
%constant.1 = s32[] constant(-42), sharding={replicated}
%constant = s32[] constant(42)
%select = s32[] select(pred[] %constant.2, s32[] %constant.1, s32[] %constant)
}
)";
auto result = ParseAndReturnVerifiedModule(original);
TF_EXPECT_OK(result.status());
}
TEST_F(HloParserTest, ConfigurationField) {
const std::string original = R"(HloModule AModule
ENTRY %configuration_test() -> s32[] {
%constant = s32[] constant(42), backend_config="foo bar"
})";
auto result = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(result.status());
EXPECT_EQ("foo bar", result.value()
->entry_computation()
->root_instruction()
->raw_backend_config_string());
}
TEST_F(HloParserTest, LiteralDimensionsError) {
const std::string original = R"(HloModule some_2x3_module
ENTRY %some_2x3 () -> f32[2,3] {
ROOT %constant = f32[2,3]{1,0} constant(}{1, 2, 3}, {4, 5, 6}})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(), "unexpected '}' token");
}
TEST_F(HloParserTest, LiteralDimensionsMismatch_1) {
const std::string original = R"(HloModule some_2_module
ENTRY %some_2 () -> f32[2] {
ROOT %constant = f32[2]{0} constant({1,{2}})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"expects nested array in rank 1, but sees larger");
}
TEST_F(HloParserTest, LiteralDimensionsMismatch_2) {
const std::string original = R"(HloModule some_2x3_module
ENTRY %some_2x3 () -> f32[2,3] {
ROOT %constant = f32[2,3]{1,0} constant({1, 2, 3, 4, 5, 6})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"expects nested array in rank 2, but sees 1");
}
TEST_F(HloParserTest, LiteralDimensionsMismatch_3) {
const std::string original = R"(HloModule some_2x3x2_module
ENTRY %some_2x3x2 () -> f32[2,3,2] {
ROOT %constant = f32[2,3,2]{2,1,0} constant({{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}, {11, 12}}})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"expects 3 elements in the [0]th element");
}
TEST_F(HloParserTest, ConstantF16Overflow) {
const std::string original =
R"(HloModule ConstantF16Overflow_module
ENTRY %ConstantF16Overflow.v4 () -> f16[] {
ROOT %constant = f16[] constant(-65520)
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type F16");
}
TEST_F(HloParserTest, ConstantBf16NoOverflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = bf16[] constant(-65505)
})";
EXPECT_EQ(absl::OkStatus(), ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, ConstantBf16Overflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = bf16[] constant(1e100)
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"out of range");
}
TEST_F(HloParserTest, ConstantU4Underflow) {
const std::string original = R"(
HloModule ConstantU4Underflow_module
ENTRY %ConstantU4Underflow () -> u4[] {
ROOT %constant = u4[] constant(-1)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type U4");
}
TEST_F(HloParserTest, ConstantU4Overflow) {
const std::string original = R"(
HloModule ConstantU4Overflow_module
ENTRY %ConstantU4Overflow () -> u4[] {
ROOT %constant = u4[] constant(16)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type U4");
}
TEST_F(HloParserTest, ConstantS4Underflow) {
const std::string original = R"(
HloModule ConstantS4Underflow_module
ENTRY %ConstantS4Underflow () -> s4[] {
ROOT %constant = s4[] constant(-9)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type S4");
}
TEST_F(HloParserTest, ConstantS4Overflow) {
const std::string original = R"(
HloModule ConstantS4Overflow_module
ENTRY %ConstantS4Overflow () -> s4[] {
ROOT %constant = s4[] constant(8)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type S4");
}
TEST_F(HloParserTest, ConstantUnsignedUnderflow) {
const std::string original = R"(
HloModule ConstantUnsignedUnderflow_module
ENTRY %ConstantUnsignedUnderflow () -> u64[] {
ROOT %constant = u64[] constant(-1)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantUnsignedOverflow) {
const std::string original = R"(
HloModule ConstantUnsignedOverflow_module
ENTRY %ConstantUnsignedOverflow () -> u32[] {
ROOT %constant = u32[] constant(4294967296)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type U32");
}
TEST_F(HloParserTest, ConstantUnsignedInt64Overflow) {
const std::string original = R"(
HloModule ConstantUnsignedOverflow_module
ENTRY %ConstantUnsignedOverflow () -> u64[] {
ROOT %constant = u64[] constant(9223372036854775808)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantC64Overflow) {
const std::string original = R"(
HloModule test_module
ENTRY test () -> c64[] {
ROOT c = c64[] constant((1e100, 0))
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantC64Underflow) {
const std::string original = R"(
HloModule test_module
ENTRY test () -> c64[] {
ROOT c = c64[] constant((0, -1e100))
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantF64Overflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = f64[] constant(1.8e308)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantF64Underflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = f64[] constant(-1.8e308)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantWithExp) {
const std::string original = R"(HloModule ConstantWithExp_module
ENTRY %ConstantWithExp.v4 () -> f32[] {
%constant.1 = f32[] constant(3e+2)
}
)";
auto result = ParseAndReturnVerifiedModule(original);
TF_EXPECT_OK(result.status());
}
TEST_F(HloParserTest, ShortConstant) {
const std::string original =
R"(HloModule ShortConstant_module, entry_computation_layout={()->f32[67,89]{1,0}}
ENTRY %ShortConstant.v4 () -> f32[67,89] {
ROOT %constant.1 = f32[67,89]{1,0} constant({...})
}
)";
auto result = ParseAndReturnVerifiedModule(original);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->ToString(HloPrintOptions()), original);
}
TEST_F(HloParserTest, NegativeNan) {
const std::string original =
R"(HloModule NegativeNan_module, entry_computation_layout={()->bf16[2]{0}}
ENTRY %NegativeNan () -> bf16[2] {
ROOT %constant = bf16[2]{0} constant({-nan, -nan})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
EXPECT_EQ(result.value()->ToString(HloPrintOptions()), original);
}
TEST_F(HloParserTest, NanPayload) {
const std::string original =
R"(HloModule NanPayload_module, entry_computation_layout={()->bf16[2]{0}}
ENTRY %NanPayload () -> bf16[2] {
ROOT %constant = bf16[2]{0} constant({-nan(0x7f), -nan(0x3f)})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
EXPECT_EQ(result.value()->ToString(HloPrintOptions()), original);
}
TEST_F(HloParserTest, InvalidNanPayloadBf16) {
const std::string original =
R"(HloModule InvalidNanPayloadBf16_module, entry_computation_layout={()->bf16[1]{0}}
ENTRY %NanPayload () -> bf16[1] {
ROOT %constant = bf16[1]{0} constant({nan(0x3ff)})
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"tries to set NaN payload 0x3ff");
}
TEST_F(HloParserTest, InvalidNanPayloadF8e4m3fn) {
const std::string original =
R"(HloModule InvalidNanPayloadF8e4m3fn_module, entry_computation_layout={()->f8e4m3fn[1]{0}}
ENTRY %NanPayload () -> f8e4m3fn[1] {
ROOT %constant = f8e4m3fn[1]{0} constant({nan(0x1)})
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"tries to set NaN payload 0x1");
}
TEST_F(HloParserTest, InvalidNanPayloadF8e4m3b11fnuz) {
const std::string original =
R"(HloModule InvalidNanPayloadF8e4m3b11fnuz_module, entry_computation_layout={()->f8e4m3b11fnuz[1]{0}}
ENTRY %NanPayload () -> f8e4m3b11fnuz[1] {
ROOT %constant = f8e4m3b11fnuz[1]{0} constant({nan(0x1)})
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"tries to set NaN payload 0x1");
}
TEST_F(HloParserTest, AttributesAnyOrder) {
const std::string original = R"(HloModule any_order_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,4,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,4,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), feature_group_count=1, sharding={maximal device=1}, backend_config="foo", dim_labels=b0f_0io->b0f, window={pad=1_1 size=1}
}
)";
TF_EXPECT_OK(ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, InvalidDimLabels) {
std::string prefix = R"(HloModule invalid_dim_labels_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1} )";
std::string suffix = R"(
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(
absl::StrCat(prefix, ",dim_labels=00_01->10", suffix))
.status()
.message(),
"expects unique");
ExpectHasSubstr(ParseAndReturnUnverifiedModule(
absl::StrCat(prefix, ",dim_labels=012_0123->210", suffix))
.status()
.message(),
"must have same number of spatial dimensions");
ExpectHasSubstr(ParseAndReturnUnverifiedModule(
absl::StrCat(prefix, ",dim_labels=013_0123->210", suffix))
.status()
.message(),
"expects [0-2bf?]");
}
TEST_F(HloParserTest, UnexpectedAttribute) {
const std::string original = R"(HloModule unexpected_attr_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, calls=%recv
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"unexpected attribute \"calls\"");
}
TEST_F(HloParserTest, MissingAttribute) {
const std::string original = R"(HloModule missing_attr_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(-2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0)
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"attribute channel_id is expected but not seen");
}
TEST_F(HloParserTest, PredecessorUndefined) {
const std::string original = R"(HloModule pre_not_found_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%done}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"'done' is not defined");
}
TEST_F(HloParserTest, SliceAllowOmitStride1) {
const std::string original = R"(HloModule slice_module
ENTRY %slice.v2 (p0: f32[3,3,4,4]) -> f32[3,3,2,4] {
%p0 = f32[3,3,4,4]{3,2,1,0} parameter(0)
ROOT %slice = f32[3,3,2,4]{3,2,1,0} slice(f32[3,3,4,4]{3,2,1,0} %p0), slice={[0:3], [0:3], [0:4:2], [0:4]}
}
)";
TF_EXPECT_OK(ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, PaddingConfigIsNotWindowPad) {
const std::string original = R"(HloModule window_pad_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), dim_labels=b0f_0io->b0f, window={pad=1_1_0 size=1}
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects padding_low and padding_high separated by '_'");
}
TEST_F(HloParserTest, CommaBetweenSubAttributes) {
const std::string original = R"(HloModule test_comma_module
ENTRY %test_comma.v4 () -> f32[] {
ROOT %constant = f32[] constant(-4.2), metadata={source_line=5, op_type="::const"}
}
)";
TF_EXPECT_OK(ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, ComputationShapeDoesNotMatchRootShape) {
const std::string original = R"(HloModule custom_call:
ENTRY %CustomCall () -> f32[1] {
%constant = f32[1]{0} constant({12345})
ROOT %foo = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar"
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Shape of computation CustomCall, f32[1], is not compatible "
"with that of its root instruction foo, f32[1,2,3]");
}
TEST_F(HloParserTest, EntryComputationLayoutNotDefined) {
const std::string original = R"(
HloModule layout_not_defined
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
auto program_layout = module.value()->entry_computation_layout();
ASSERT_EQ(program_layout.parameter_count(), 1);
auto param_layout = program_layout.parameter_layout(0).layout();
auto result_layout = program_layout.result_layout().layout();
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1, 2}), param_layout))
<< "actual layout of parameter(0) is "
<< LayoutUtil::HumanString(param_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}), result_layout))
<< "actual layout of result is "
<< LayoutUtil::HumanString(result_layout);
}
TEST_F(HloParserTest, EntryComputationLayoutDefined) {
const std::string original = R"(
HloModule layout_defined, entry_computation_layout={(f32[8,16,256]) -> f32[8,16]}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(false));
TF_ASSERT_OK(module.status());
EXPECT_FALSE(module.value()->entry_computation_layout().AnyLayoutSet());
}
TEST_F(HloParserTest, DoNotSetEntryComputationLayoutIfSet) {
const std::string original = R"(
HloModule layout_defined, entry_computation_layout={(f32[8,16,256]{1,2,0}) -> f32[8,16]}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation_layout()
.parameter_layout(0)
.layout()
.minor_to_major(),
ElementsAre(1, 2, 0));
}
TEST_F(HloParserTest, SetEntryComputationLayoutIfNotSet) {
const std::string original = R"(
HloModule layout_defined, entry_computation_layout={(f32[8,16,256]) -> f32[8,16]}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation_layout()
.parameter_layout(0)
.layout()
.minor_to_major(),
ElementsAre(2, 1, 0));
}
TEST_F(HloParserTest, DoNotFallBackToDefaultLayoutIfDisabled) {
const std::string original = R"(
HloModule t
ENTRY main {
p0 = f16[16,32,48,64]{3,2,1,0} parameter(0)
p1 = f16[80,64,48,32]{3,2,1,0} parameter(1)
ROOT dot = f16[64,32,16,80] dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={3,1}, rhs_batch_dims={1,3}
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(false));
TF_ASSERT_OK(module.status());
EXPECT_FALSE(module.value()
->entry_computation()
->root_instruction()
->shape()
.has_layout());
}
TEST_F(HloParserTest, FallBackToDefaultLayoutIfEnabled) {
const std::string original = R"(
HloModule t
ENTRY main {
p0 = f16[16,32,48,64]{3,2,1,0} parameter(0)
p1 = f16[80,64,48,32]{3,2,1,0} parameter(1)
ROOT dot = f16[64,32,16,80] dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={3,1}, rhs_batch_dims={1,3}
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation()
->root_instruction()
->shape()
.layout()
.minor_to_major(),
ElementsAre(3, 2, 1, 0));
}
TEST_F(HloParserTest, FallBackToDefaultLayoutIfAlreadySet) {
const std::string original = R"(
HloModule t
ENTRY main {
p0 = f16[16,32,48,64]{3,2,1,0} parameter(0)
p1 = f16[80,64,48,32]{3,2,1,0} parameter(1)
ROOT dot = f16[64,32,16,80]{1,2,0,3} dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={3,1}, rhs_batch_dims={1,3}
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation()
->root_instruction()
->shape()
.layout()
.minor_to_major(),
ElementsAre(1, 2, 0, 3));
}
TEST_F(HloParserTest, NoEntry) {
const std::string original = R"(HloModule no_entry:
c1 {
const1 = f32[1]{0} constant({12345})
}
c2 {
const2 = f32[1]{0} constant({67890})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
EXPECT_EQ(module.value()->entry_computation()->name(), "c2");
}
TEST_F(HloParserTest, NoRoot) {
const std::string original = R"(HloModule no_root:
ENTRY consts {
first = f32[1]{0} constant({12345})
last = f32[1]{0} constant({67890})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
EXPECT_EQ(module.value()->entry_computation()->root_instruction()->name(),
"last");
}
TEST_F(HloParserTest, Comments) {
const std::string original = R"(
HloModule comments:
ENTRY c1 {
ROOT const1 = f32[1]{0} constant({12345 })
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, MultilineComments) {
const std::string original = R"(HloModule multiline_comment:
ENTRY c1 {
ROOT const1 = f32[1]{0} constant({12345})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, UnterminatedComment) {
const std::string original = R"(HloModule unterminated_comment:
ENTRY c1 {
/* unterminated
ROOT const1 = f32[1]{0} constant({12345})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"/* unterminated\n^");
}
TEST_F(HloParserTest, SlashSlashComments) {
const std::string original = R"(HloModule slash_slash_comment:
ENTRY c1 {
ROOT const1 = f32[1]{0} constant({12345})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, SlashSlashCommentMsDosEolFormat) {
const std::string original =
"HloModule slash_slash_comment:\r\n
"bar\r\nROOT const1 = f32[1]{0} constant({12345})
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, SlashSlashCommentMacEolFormat) {
const std::string original =
"HloModule slash_slash_comment:\r
"bar\rROOT const1 = f32[1]{0} constant({12345})
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, MultipleEntries) {
const std::string original = R"(HloModule multiple_entries:
ENTRY c1 {
const1 = f32[1]{0} constant({12345})
}
ENTRY c2 {
const2 = f32[1]{0} constant({67890})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects only one ENTRY");
}
TEST_F(HloParserTest, SimpleAliasing) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0}: (0, {0}, must-alias), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
std::unique_ptr<HloModule> parsed_module = std::move(module).value();
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {0}),
ShapeIndex{0});
EXPECT_TRUE(
parsed_module->input_output_alias_config().ParameterMustAlias(0, {0}));
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {1}),
ShapeIndex{1});
EXPECT_FALSE(
parsed_module->input_output_alias_config().ParameterMustAlias(0, {1}));
}
TEST_F(HloParserTest, NestedAliasing) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, 0}: (0, {0}), {1, 1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
%t0 = (f32[], f32[]) tuple(%p0, %p1)
%t1 = (f32[], f32[]) tuple(%p0, %p1)
ROOT %out = ((f32[], f32[]), (f32[], f32[])) tuple(%t0, %t1)
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
std::unique_ptr<HloModule> parsed_module = std::move(module).value();
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {0}),
ShapeIndex({0, 0}));
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {1}),
ShapeIndex({1, 1}));
}
TEST_F(HloParserTest, AliasingWrongIndex) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0 : (0, {0}), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expects '}' at the end of ShapeIndex");
}
TEST_F(HloParserTest, AliasingShapeIndexNotNumerical) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, a}: (0, {0}), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, AliasingWrongFormatNoColon) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, 0}: (0, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expects '{' at the start of ShapeIndex");
}
TEST_F(HloParserTest, AliasingWrongFormatTwoColons) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0}: (0, {0}): {0, 1}, {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expects '}' at the end of aliasing description");
}
TEST_F(HloParserTest, AliasingWrongFormatAlphaParam) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, a}: (zero, {0}), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, SimpleBufferDonor) {
const std::string original = R"(
HloModule Module, buffer_donor={ (0, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
std::unique_ptr<HloModule> parsed_module = std::move(module).value();
EXPECT_TRUE(
parsed_module->buffer_donor_config().ParameterIsBufferDonor(0, {0}));
EXPECT_TRUE(
parsed_module->buffer_donor_config().ParameterIsBufferDonor(0, {1}));
EXPECT_FALSE(
parsed_module->buffer_donor_config().ParameterIsBufferDonor(0, {}));
}
TEST_F(HloParserTest, BufferDonorShapeIndexNotNumerical) {
const std::string original = R"(
HloModule Module, buffer_donor={ (0, {0, a}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, BufferDonorWrongFormatAlphaParam) {
const std::string original = R"(
HloModule Module, buffer_donor={ (zero, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, MultipleRoots) {
const std::string original = R"(HloModule multiple_roots:
ENTRY consts {
ROOT const1 = f32[1]{0} constant({12345})
ROOT const2 = f32[1]{0} constant({12345})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"one computation should have only one ROOT");
}
TEST_F(HloParserTest, ComputationExists) {
const std::string original = R"(HloModule comp_exists
comp {
const1 = f32[1]{0} constant({12345})
}
comp {
const2 = f32[1]{0} constant({67890})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
R"(was parsing 2:1: error: computation previously defined here
comp {
^)");
}
TEST_F(HloParserTest, CrossComputationLookup) {
const std::string original = R"(HloModule cross_computation_lookup:
tcalla (a: (s32[], s32[])) -> (s32[], s32[]) {
ROOT aparam = (s32[], s32[]) parameter(0)
}
tcallb (b: (s32[], s32[])) -> s32[] {
rparam = (s32[], s32[]) parameter(0)
ROOT gte0 = s32[] get-tuple-element(aparam), index=0
}
ENTRY entry {
param = (s32[], s32[]) parameter(0)
call0 = (s32[], s32[]) call(param), to_apply=tcalla
ROOT call1 = s32[] call(param), to_apply=tcallb
})";
ExpectHasSubstr(
ParseAndReturnUnverifiedModule(original).status().message(),
"was parsing 8:39: error: instruction does not exist: aparam");
}
TEST_F(HloParserTest, SameNameDiffComputations) {
const std::string original = R"(HloModule same_names:
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT result = f32[] add(p0, p1)
}
ENTRY ReduceR3ToR2 {
p0 = f32[8,16,256]{2,1,0} parameter(0)
p1 = f32[] constant(0)
ROOT result = f32[8,16]{1,0} reduce(p0, p1), dimensions={2}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(original));
ASSERT_NE(module->entry_computation(), nullptr);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reduce()));
}
TEST_F(HloParserTest, ParseSharding) {
const std::string original = "{maximal device=42}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
}
TEST_F(HloParserTest, ParseShardingPartialReplication) {
const std::string original = "{devices=[2,2]0,1,2,3 last_tile_dim_replicate}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
Array<int64_t> tiling_last_dim_replicated({{0, 1}, {2, 3}});
EXPECT_EQ(HloSharding::PartialTile(tiling_last_dim_replicated).ToString(),
original);
}
TEST_F(HloParserTest, ParseShardingSubGroup) {
const std::string original =
"{devices=[2,2,2,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 "
"last_tile_dims={manual, replicated}}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
Array<int64_t> tile_assignment({2, 2, 2, 2});
tile_assignment.FillIota(0);
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types).ToString(),
original);
}
TEST_F(HloParserTest, ParseTrivialIotaShardingPartialReplication) {
const std::string original = "{devices=[2,2]<=[4] last_tile_dim_replicate}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tiling_last_dim_replicated((absl::Span<const int64_t>){2, 2});
EXPECT_EQ(HloSharding::PartialTile(tiling_last_dim_replicated).ToString(),
original);
}
TEST_F(HloParserTest, ParseTrivialIotaShardingSubGroup) {
const std::string original =
"{devices=[2,2,2,2]<=[16] last_tile_dims={manual, replicated}}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tile_assignment({2, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types).ToString(),
original);
}
TEST_F(HloParserTest, ParseTransposedIotaShardingPartialReplication) {
const std::string original =
"{devices=[2,2]<=[2,2]T(1,0) last_tile_dim_replicate}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tiling_last_dim_replicated({2, 2}, {2, 2}, {1, 0});
EXPECT_EQ(HloSharding::PartialTile(tiling_last_dim_replicated).ToString(),
original);
}
TEST_F(HloParserTest, ParseTransposedIotaShardingSubGroup) {
const std::string original =
"{devices=[2,2,2,2]<=[2,2,4]T(2,1,0) last_tile_dims={manual, "
"replicated}}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tile_assignment({2, 2, 2, 2}, {2, 2, 4}, {2, 1, 0});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types).ToString(),
original);
}
TEST_F(HloParserTest, ParseShardAs) {
const std::string original = "{manual shard_as 1}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
EXPECT_EQ(
HloSharding::Manual().SetShardGroup(HloSharding::ShardAs(1)).ToString(),
original);
}
TEST_F(HloParserTest, ParseShardLike) {
const std::string original =
"{devices=[2,2,2,2]<=[16] last_tile_dims={manual, replicated} shard_like "
"1}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tile_assignment({2, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types)
.SetShardGroup(HloSharding::ShardLike(1))
.ToString(),
original);
}
TEST_F(HloParserTest, ParseUnknownSharding) {
const std::string original = "{unknown}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
EXPECT_EQ(HloSharding::Unknown().ToString(), original);
}
TEST_F(HloParserTest, ParseFrontendAttributes) {
const std::string original =
R"({attr_a="test_a",attr_b="b",attr_c="s64",attr_d="a/b"})";
TF_ASSERT_OK_AND_ASSIGN(FrontendAttributes frontend_attributes,
ParseFrontendAttributes(original));
EXPECT_EQ(FrontendAttributesToString(frontend_attributes), original);
}
TEST_F(HloParserTest, ParseWindow) {
Window original = window_util::MakeWindow({1, 2, 3});
TF_ASSERT_OK_AND_ASSIGN(Window parsed,
ParseWindow(window_util::ToString(original)))
EXPECT_EQ(window_util::ToString(original), window_util::ToString(parsed));
}
TEST_F(HloParserTest, ParseConvolutionDimensionNumbers) {
const std::string original = "b0f_0io->b0f";
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers dnums,
ParseConvolutionDimensionNumbers(original));
EXPECT_EQ(original, ConvolutionDimensionNumbersToString(dnums));
}
TEST_F(HloParserTest, ParseConvolutionDimensionNumbersWithUnknownDims) {
const std::string original = "b0?f_?0?io->?b?0?f";
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers dnums,
ParseConvolutionDimensionNumbers(original));
EXPECT_EQ(original, ConvolutionDimensionNumbersToString(dnums));
}
TEST_F(HloParserTest, ParseReplicaGroups) {
const std::string original = "{{0,1},{2,3}}";
TF_ASSERT_OK_AND_ASSIGN(std::vector<ReplicaGroup> replica_groups,
ParseReplicaGroupsOnly(original));
EXPECT_EQ(original, ReplicaGroupsToString(replica_groups));
}
TEST_F(HloParserTest, ParsePaddingConfigNoInteriorPadding) {
const std::string original = "0_1x2_3";
TF_ASSERT_OK_AND_ASSIGN(PaddingConfig dnums, ParsePaddingConfig(original));
EXPECT_EQ(original, PaddingConfigToString(dnums));
}
TEST_F(HloParserTest, ParsePaddingConfigInteriorPadding) {
const std::string original = "0_1_0x2_3_4";
TF_ASSERT_OK_AND_ASSIGN(PaddingConfig dnums, ParsePaddingConfig(original));
EXPECT_EQ(original, PaddingConfigToString(dnums));
}
TEST_F(HloParserTest, ParsePaddingConfigInteriorPaddingImplicitZeroDim) {
TF_ASSERT_OK_AND_ASSIGN(PaddingConfig dnums, ParsePaddingConfig("0_1x2_3_4"));
EXPECT_EQ("0_1_0x2_3_4", PaddingConfigToString(dnums));
}
TEST_F(HloParserTest, NontupleInfeed) {
const std::string original = R"(HloModule nontuple_infeed:
ENTRY nontuple_infeed {
token0 = token[] after-all()
ROOT infeed = pred[] infeed(token0)
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"infeed must have a non-empty tuple shape");
}
TEST(HloParserSingleOpTest, SingleOp) {
const std::string text =
"%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, "
"f32[2,4]{1,0} %x)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
}
TEST(HloParserSingleOpTest, SingleOpNoShapeProducesError) {
const std::string text =
"multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(text);
ASSERT_TRUE(!module.status().ok());
LOG(INFO) << "Status: " << module.status();
EXPECT_THAT(module.status().ToString(),
HasSubstr("expects '=' in instruction"));
}
TEST(HloParserSingleOpTest, SingleOpNoOperandShapesProducesError) {
const std::string text = "%multiply = f32[2,4]{1,0} multiply(%broadcast, %x)";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(text);
ASSERT_TRUE(!module.status().ok());
LOG(INFO) << "Status: " << module.status();
EXPECT_THAT(module.status().ToString(),
HasSubstr("Operand had no shape in HLO text"));
}
TEST(HloParserSingleOpTest, SingleOpNoNames) {
const std::string text =
"%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0}, f32[2,4]{1,0})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
}
TEST(HloParserSingleOpTest, CanonicalOp) {
const std::string text =
"f32[2,4]{1,0} multiply(f32[2,4]{1,0}, f32[2,4]{1,0})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(
computation->root_instruction()->ToString(HloPrintOptions::Canonical()),
text);
}
TEST(HloParserSingleOpTest, CanonicalOpWithNested) {
const std::string text =
R"(f32[5,20]{1,0} while(f32[5,10]{1,0}), condition=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
}, body=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_EQ(
computation->root_instruction()->ToString(HloPrintOptions::Canonical()),
text);
}
TEST(HloParserSingleOpTest, CanonicalOpIndexedConditionalInlinedBranches) {
const std::string text =
R"(f32[5,10]{1,0} conditional(s32[], f32[5,10]{1,0}, f32[5,10]{1,0}, f32[5,10]{1,0}), branch_computations={
{
tmp_0 = f32[5,10]{1,0} parameter(0)
ROOT tmp_1 = f32[5,10]{1,0} ceil(f32[5,10]{1,0} tmp_0)
},
{
tmp_0 = f32[5,10]{1,0} parameter(0)
ROOT tmp_1 = f32[5,10]{1,0} floor(f32[5,10]{1,0} tmp_0)
},
{
tmp_0 = f32[5,10]{1,0} parameter(0)
ROOT tmp_1 = f32[5,10]{1,0} copy(f32[5,10]{1,0} tmp_0)
}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_EQ(
computation->root_instruction()->ToString(HloPrintOptions::Canonical()),
text);
}
TEST(HloParserSingleOpTest, SingleOpWithNested) {
const std::string text =
R"(%fusion = f32[3,2,1,1]{3,2,1,0} fusion(f32[3,2,1,1]{3,2,1,0} %p0, f32[2]{0} %p1), kind=kLoop, calls=
{
%param_0 = f32[3,2,1,1]{3,2,1,0} parameter(0)
%param_1 = f32[2]{0} parameter(1)
%broadcast = f32[3,2,1,1]{3,2,1,0} broadcast(f32[2]{0} %param_1), dimensions={1}
ROOT %subtract = f32[3,2,1,1]{3,2,1,0} subtract(f32[3,2,1,1]{3,2,1,0} %param_0, f32[3,2,1,1]{3,2,1,0} %broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kFusion)
.WithNumOperands(2)
.WithOperand(0, m::Parameter(0))
.WithOperand(1, m::Parameter(1))));
}
TEST(HloParserSingleOpTest, SingleOpWithNested_DoesNotExist) {
const std::string text =
R"(reduce = f32[] reduce(f32[10], f32[]), dimensions={1}, to_apply=
{
result = f32[] add(f32[] x, f32[] y)
})";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("does not exist: x"));
}
TEST(HloParserSingleOpTest, SingleOpWithNested_NoLhs) {
const std::string text =
R"(reduce = f32[] reduce(f32[10], f32[]), dimensions={1}, to_apply=
{
f32[] add(f32[] x, f32[] y)
})";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("expects name"));
}
TEST(HloParserSingleOpTest, SingleOpWithNested_NoOperandName) {
const std::string text =
R"(reduce = f32[] reduce(f32[10], f32[]), dimensions={1}, to_apply=
{
result = f32[] add(f32[], f32[])
})";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("expects name"));
}
TEST(HloParserSingleOpTest, ConvolutionTrivialFeatureGroupCount) {
const std::string text =
R"(%convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convolution(m::Parameter(0), m::Parameter(1))));
auto* convolution =
Cast<HloConvolutionInstruction>(computation->root_instruction());
EXPECT_EQ(convolution->feature_group_count(), 1);
}
TEST(HloParserSingleOpTest, MultipleOpsProducesError) {
const std::string text = R"(
param = f32[2,5,1,3] parameter(0)
transpose = f32[1,5,2,3] transpose(param), dimensions={2,1,0,3}
)";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected eof"));
}
TEST_F(HloParserTest, IsScheduledIsFalse) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=false
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_FALSE(module->has_schedule());
}
TEST_F(HloParserTest, IsScheduledNotPresent) {
const std::string text = R"(
HloModule axpy_module
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_FALSE(module->has_schedule());
}
TEST_F(HloParserTest, IsScheduledIsTrue) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=true
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
EXPECT_EQ(module->schedule().sequences().size(), 1);
ASSERT_TRUE(
module->schedule().is_computation_scheduled(module->entry_computation()));
EXPECT_THAT(
module->schedule().sequence(module->entry_computation()).instructions(),
ElementsAre(GmockMatch(m::Parameter()), GmockMatch(m::Broadcast()),
GmockMatch(m::Parameter()), GmockMatch(m::Multiply()),
GmockMatch(m::Parameter()), GmockMatch(m::Add())));
}
TEST_F(HloParserTest, IsScheduledIsTrueDifferentOrder) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=true
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%x = f32[2,4]{1,0} parameter(1)
%y = f32[2,4]{1,0} parameter(2)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
EXPECT_EQ(module->schedule().sequences().size(), 1);
ASSERT_TRUE(
module->schedule().is_computation_scheduled(module->entry_computation()));
EXPECT_THAT(
module->schedule().sequence(module->entry_computation()).instructions(),
ElementsAre(GmockMatch(m::Parameter()), GmockMatch(m::Parameter()),
GmockMatch(m::Parameter()), GmockMatch(m::Broadcast()),
GmockMatch(m::Multiply()), GmockMatch(m::Add())));
}
TEST_F(HloParserTest, CustomCallWrongNumberofOperandConstraints) {
const std::string original =
R"(HloModule CustomCallWrongNumberofOperandConstraints
ENTRY %CustomCallWrongNumberofOperandConstraints (p0: f32[42,2,3], p1: f32[123,4]) -> f32[1,2,3] {
%p0 = f32[42,2,3]{0,1,2} parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = f32[1,2,3]{0,1,2} custom-call(f32[42,2,3]{0,1,2} %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={f32[42,2,3]{0,1,2}}
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expected 2 operand layout constraints, 1 given");
}
TEST_F(HloParserTest, CustomCallIncompatibleOperandConstraints) {
const std::string original =
R"(HloModule CustomCallIncompatibleOperandConstraints
ENTRY %CustomCallIncompatibleOperandConstraints (p0: f32[42,2,3], p1: f32[123,4]) -> f32[1,2,3] {
%p0 = f32[42,2,3]{0,1,2} parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = f32[1,2,3]{0,1,2} custom-call(f32[42,2,3]{0,1,2} %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={f32[42,2,3]{0,1,2}, f32[555,5]{1,0}}
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"operand 1 is not compatible with operand shape");
}
TEST_F(HloParserTest, CustomCallWithNonexistentVersion) {
const std::string original = R"(HloModule custom_call
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", api_version=API_VERSION_THAT_DOESNT_EXIST
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Unknown API version");
}
TEST_F(HloParserTest, CustomCallWithUnspecifiedVersion) {
const std::string original = R"(HloModule custom_call
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", api_version=API_VERSION_UNSPECIFIED
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Invalid API version");
}
TEST_F(HloParserTest, AllowShapeWhitespace) {
const std::string text = R"(
HloModule module
ENTRY entry {
ROOT root = f32[ 1, 2,3, 4, 5]{0, 1, 2,3, 4 } parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
}
TEST_F(HloParserTest, ShapeMismatchInOperand) {
const std::string text = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> f32[2,2] {
%p = f32[2,2] parameter(0)
%constant.1 = f32[2,2] constant({{1, 2}, {3, 4}})
ROOT %add.1 = f32[2,2] add(f32[2,2] %p, f32[2,5] %constant.1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(text).status().message(),
"The declared operand shape f32[2,5]{1,0} is not compatible"
" with the shape of the operand instruction f32[2,2]{1,0}.");
}
TEST_F(HloParserTest, ParseShapeStringR2F32) {
std::string shape_string = "f32[123,456]";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShape(F32, {123, 456});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringUnbounded) {
std::string shape_string = "f32[?,784]";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected =
ShapeUtil::MakeShape(F32, {Shape::kUnboundedSize, 784}, {true, false});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringTupleOfArrays) {
std::string shape_string = "(f32[1572864],s8[5120,1024])";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {1572864}),
ShapeUtil::MakeShape(S8, {5120, 1024})});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringNestedTuple) {
std::string shape_string = "(f32[1],(f32[2], token[]), opaque[], f32[3])";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {1}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2}), ShapeUtil::MakeTokenShape()}),
ShapeUtil::MakeOpaqueShape(),
ShapeUtil::MakeShape(F32, {3}),
});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithLayout) {
std::string shape_string = "f32[123,456]{0,1}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(F32, {123, 456}, {0, 1});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithTilingLayout) {
std::string shape_string = "f32[123,456]{0,1:T(2,128)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(F32, {123, 456}, {0, 1},
{Tile({2, 128})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "f32[123,456,789]{0,1,2:T(2, * , 128)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
F32, {123, 456, 789}, {0, 1, 2},
{Tile({2, Tile::kCombineDimension, 128})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "bf16[123,456,789]{2,1,0:T(2,*,128)(2,1)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
BF16, {123, 456, 789}, {2, 1, 0},
{Tile({2, Tile::kCombineDimension, 128}), Tile({2, 1})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "f32[123,456,789]{1:T(2, * , 128)}";
auto result = ParseShape(shape_string);
ExpectHasSubstr(result.status().message(),
"Dimensions size is 3, but minor to major size is 1.");
}
TEST_F(HloParserTest, ParseShapeStringWithElementSizeInBits) {
std::string shape_string = "s4[123,456]{1,0:T(2,128)E(4)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(S4, {123, 456}, {1, 0},
{Tile({2, 128})}, 1, 4);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithMemorySpaceLayout) {
std::string shape_string = "pred[123,456]{1,0:T(2,128)S(3)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {Tile({2, 128})}, 1, 0, 3);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:S(3)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(PRED, {123, 456}, {1, 0}, {},
1, 0, 3);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:S(3)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(PRED, {123, 456}, {1, 0}, {},
1, 0, 3);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithDynamicShapeMetadataPrefix) {
std::string shape_string = "f32[123,456]{1,0:T(16,128)M(1024)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(F32, {123, 456}, {1, 0},
{Tile({16, 128})});
expected.mutable_layout()->set_dynamic_shape_metadata_prefix_bytes(1024);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithSplitConfigLayout) {
std::string shape_string = "pred[123,456]{1,0:T(2,128)S(3)SC(1:200)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {Tile({2, 128})}, 1, 0, 3,
{SplitConfig(1, {200})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:S(3)SC(0:10)(1:4,5)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {}, 1, 0, 3,
{SplitConfig(0, {10}), SplitConfig(1, {4, 5})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:SC(1:50,200)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {}, 1, 0, 0, {SplitConfig(1, {50, 200})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseOpaqueType) {
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape("opaque[]"));
Shape expected = ShapeUtil::MakeOpaqueShape();
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseTokenType) {
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape("token[]"));
Shape expected = ShapeUtil::MakeTokenShape();
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseInvalidShapeString) {
std::string shape_strings[] = {"f32[123,456]foobar{0,1}", "f32[123,456]{foo}",
"f32[123,456]dense{foo}"};
for (const std::string& shape_string : shape_strings) {
absl::StatusOr<Shape> result = ParseShape(shape_string);
ASSERT_FALSE(result.ok()) << "shape: " << shape_string;
}
}
TEST_F(HloParserTest, ParseDynamicArray) {
std::string shape_string = "f32[123,<=456]";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShape(F32, {123, 456}, {false, true});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseDynamicTuple) {
std::string shape_string = "(f32[42], u32[<=123,<=456])";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeShape(U32, {123, 456}, {true, true})});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseInvalidDimLevel) {
constexpr std::string_view shape_string = "f32[123]{0:D(D+~)}";
absl::StatusOr<Shape> result = ParseShape(shape_string);
ASSERT_THAT(
result.status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
testing::HasSubstr(
"invalid DimLevelType/unique/ordered combination in shape")));
}
TEST_F(HloParserTest, NegativeParameterNumber) {
const std::string hlo_string = "par0 = f32[3,5] parameter(-1)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
ASSERT_FALSE(result.status().ok());
EXPECT_THAT(result.status().message(),
HasSubstr("parameter number must be >= 0"));
}
TEST_F(HloParserTest, DuplicateParameterNumberIsDetected) {
const std::string kHloString = R"(
ENTRY e {
a = s8[] parameter(0)
b = s8[] parameter(0)
ROOT a = s8[] add(a, b)
}
)";
auto result = ParseAndReturnUnverifiedModule(kHloString);
ASSERT_FALSE(result.status().ok());
EXPECT_THAT(result.status().message(),
HasSubstr("Duplicate parameter number 0"));
}
TEST_F(HloParserTest, WrongNumberOfParameterLeafBuffersInReplication) {
const std::string hlo_string =
"par0 = (f32[3,5], f32[]) parameter(0), "
"parameter_replication={true,false,true}";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
ASSERT_FALSE(result.status().ok());
EXPECT_THAT(result.status().message(),
HasSubstr("parameter has 2 leaf buffers, but "
"parameter_replication has 3 elements"));
}
TEST_F(HloParserTest, CheckIndexedConditionalDimension) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[2] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("The first operand must be a scalar"));
}
TEST_F(HloParserTest, CheckIndexedConditionalElementType) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = f32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("The first operand must be a scalar of PRED or S32"));
}
TEST_F(HloParserTest,
CheckPredicatedConditionalRequiresTrueAndFalseComputation) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = pred[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("unexpected attribute \"branch_computations\""));
}
TEST_F(HloParserTest, InferUnaryShape) {
constexpr char text[] = R"(HloModule InferUnaryShapeTest
ENTRY InferUnaryShape {
a = f32[2,10]{1,0} parameter(0)
ROOT v = abs(a)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
}
TEST_F(HloParserTest, InferBinaryShape) {
constexpr char text[] = R"(HloModule InferBinaryShapeTest
ENTRY InferBinaryShape {
a = f32[2,10]{1,0} parameter(0)
b = f32[2,10]{1,0} parameter(1)
ROOT sum = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 10}, {1, 0})));
}
TEST_F(HloParserTest, InferTernaryShape) {
constexpr char text[] = R"(HloModule InferTernaryShapeTest
ENTRY InferTernaryShape {
p = pred[] constant(true)
f = s32[] constant(-42)
t = s32[] constant(42)
ROOT select = select(p, f, t)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeScalarShape(S32)));
}
TEST_F(HloParserTest, TupleTypo) {
constexpr char text[] = R"(HloModule TupleTypoTest
ENTRY TupleTypo {
pow = s32[] constant(42)
ROOT v = (s32[]) tuple(power)
}
)";
auto result = ParseAndReturnVerifiedModule(text);
EXPECT_THAT(result.status(),
tsl::testing::StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("instruction does not exist")));
}
TEST_F(HloParserTest, InferDotShape) {
constexpr char text[] = R"(HloModule InferDotShapeTest
ENTRY InferDotShape {
a = f32[2,10]{1,0} parameter(0)
b = f32[10,2]{1,0} parameter(1)
ROOT dot = dot(a, b), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShape(F32, {2}, {0})));
}
TEST_F(HloParserTest, InferSparseDotShape) {
constexpr char text[] = R"(HloModule InferSparseDotShapeTest
ENTRY InferSparseDotShape {
a = f32[2,16]{1,0} parameter(0)
b = f32[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = dot(a, b, meta), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShape(F32, {2}, {0})));
}
TEST_F(HloParserTest, InferTupleShape) {
constexpr char text[] = R"(HloModule InferTupleShapeTest
ENTRY InferTupleShape () -> s32[2,3] {
c0 = f32[3]{0} constant({1, 2, 3})
c1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
tuple = tuple(c0, c1)
ROOT get = get-tuple-element(tuple), index=1, sharding={maximal device=0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 3}, {1, 0})));
}
TEST_F(HloParserTest, InferShapeMixedExplicitShape) {
constexpr char text[] = R"(HloModule InferUnaryShapeTest
Negate {
x = f32[] parameter(0)
ROOT negate = negate(x)
}
Identity {
y = f32[] parameter(0)
ROOT copy = copy(y)
}
ENTRY InferUnaryShape {
a = f32[] parameter(0)
b = f32[] parameter(1)
p = pred[] parameter(2)
c = f32[] add(a, b)
ROOT conditional = conditional(p, a, c), true_computation=Negate, false_computation=Identity
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeScalarShape(F32)));
}
TEST_F(HloParserTest, CheckAliasPassthroughParams) {
const char* const hlo_string = R"(
HloModule TestModule, alias_passthrough_params=true
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_TRUE(result.value()->config().alias_passthrough_params());
}
TEST_F(HloParserTest, CheckReplicaCount) {
const char* const hlo_string = R"(
HloModule TestModule, replica_count=5
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->config().replica_count(), 5);
}
TEST_F(HloParserTest, CheckNumPartitions) {
const char* const hlo_string = R"(
HloModule TestModule, num_partitions=3
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->config().num_partitions(), 3);
EXPECT_TRUE(result.value()->config().use_spmd_partitioning());
}
TEST_F(HloParserTest, CheckFrontendAttributes) {
const char* const hlo_string = R"(
HloModule TestModule, frontend_attributes={attr_name="attr_value"}
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->frontend_attributes().map().size(), 1);
EXPECT_EQ(result.value()->frontend_attributes().map().begin()->first,
"attr_name");
EXPECT_EQ(result.value()->frontend_attributes().map().begin()->second,
"attr_value");
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToParameters) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_parameters=true
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ((*result)
->config()
.allow_spmd_sharding_propagation_to_parameters()
.size(),
1);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_parameters()[0]);
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToParametersVec) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_parameters={true,false}
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ((*result)
->config()
.allow_spmd_sharding_propagation_to_parameters()
.size(),
2);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_parameters()[0]);
EXPECT_FALSE(
(*result)->config().allow_spmd_sharding_propagation_to_parameters()[1]);
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToOutput) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_output=true
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(
(*result)->config().allow_spmd_sharding_propagation_to_output().size(),
1);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_output()[0]);
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToOutputVec) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_output={true,false}
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(
(*result)->config().allow_spmd_sharding_propagation_to_output().size(),
2);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_output()[0]);
EXPECT_FALSE(
(*result)->config().allow_spmd_sharding_propagation_to_output()[1]);
}
TEST_F(HloParserTest, NestedBroadcastWithoutDimensionsAttribute) {
const char* const hlo_string = R"(
HloModule test
ENTRY test {
ROOT root = sqrt(f32[10,10] broadcast(f32[10] parameter(0)))
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(), HasSubstr("dimensions"));
}
TEST_F(HloParserTest, InvalidDimLevelType) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(X,C)} parameter(0)
})";
EXPECT_THAT(ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("expected a DimLevelType abbreviation")));
}
TEST_F(HloParserTest, InvalidDimLevelTypeCount) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C)} parameter(0)
})";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Dimensions size is 2, but dim level types size is 1")));
}
TEST_F(HloParserTest, RejectSparseTiles) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)T(128,8)} parameter(0)
})";
EXPECT_THAT(ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Layout has tiles, but is for a sparse array")));
}
TEST_F(HloParserTest, RejectDensePhysicalShape) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:T(128,8)P(f32[10,10])} parameter(0)
})";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr(
"Layout has physical shape, but is not for a sparse array")));
}
TEST_F(HloParserTest, ParseSingleComputation) {
const std::string original = R"(
test {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, ParseComputationNameClosingBrace) {
const std::string original = R"(
test {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
}
)";
EXPECT_TRUE(ParseAndReturnUnverifiedModule(original).ok());
}
TEST_F(HloParserTest, ParseSingleEntryComputation) {
const std::string original = R"(
ENTRY test {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, ParseMultiComputations) {
const std::string original = R"(
comp1 {
ROOT root = f32[1,64,10,128]{3,2,1,0} parameter(0)
}
comp2 {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, ParseMultiComputationsWithEntry) {
const std::string original = R"(
ENTRY comp1 {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
}
comp2 {
ROOT root = f32[1,64,10,128]{3,2,1,0} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, NontrivialAsyncOpRoundTrip) {
const std::string original = R"(
HloModule module
%async_wrapped {
%async_param.1 = s32[1024]{0} parameter(0)
%copy = s32[1024]{0} copy(s32[1024]{0} %async_param.1)
%async_param.2 = s32[256]{0} parameter(1)
%async_param.3 = s32[] parameter(2)
ROOT %dus = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %copy, s32[256]{0} %async_param.2, s32[] %async_param.3)
}
ENTRY %main {
%input.5 = s32[] parameter(1)
%broadcast = s32[1024]{0} broadcast(s32[] %input.5), dimensions={}
%input.0 = s32[256]{0} parameter(0)
%async-start = ((s32[1024]{0}, s32[256]{0}, s32[]), s32[1024]{0}, u32[]) async-start(%broadcast, %input.0, %input.5), calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[256]{0}, s32[]), s32[1024]{0}, u32[]) %async-start), calls=%async_wrapped
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
TF_ASSERT_OK_AND_ASSIGN(
auto roundtrip_module,
ParseAndReturnUnverifiedModule(module->ToString(
HloPrintOptions().set_syntax_sugar_async_ops(true))));
auto fp_options = HloPrintOptions::Fingerprint();
EXPECT_EQ(roundtrip_module->ToString(fp_options),
module->ToString(fp_options));
}
TEST_F(HloParserTest, LexesAsJsonDict) {
EXPECT_TRUE(LexesAsJsonDict("{}"));
EXPECT_TRUE(LexesAsJsonDict("{abc: 123}"));
EXPECT_TRUE(LexesAsJsonDict("{{abc: 123}, {{{d}}}}"));
EXPECT_TRUE(LexesAsJsonDict(R"({"}"})"));
EXPECT_TRUE(LexesAsJsonDict(R"({"\"}"})"));
EXPECT_TRUE(LexesAsJsonDict(R"({"\"{"})"));
EXPECT_FALSE(LexesAsJsonDict(""));
EXPECT_FALSE(LexesAsJsonDict("{"));
EXPECT_FALSE(LexesAsJsonDict("}"));
EXPECT_FALSE(LexesAsJsonDict("{{}"));
EXPECT_FALSE(LexesAsJsonDict("{}}"));
EXPECT_FALSE(LexesAsJsonDict("{}a"));
EXPECT_FALSE(LexesAsJsonDict("a{}"));
EXPECT_FALSE(LexesAsJsonDict("{{{{}}}"));
}
TEST_F(HloParserTest, AsyncStartMissingOperandWrapper) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartMissingOperandWrapper {
p0 = f32[2,3] parameter(0)
async-start = (f32[2,3], f32[3,2], s32[]) async-start(p0), calls=async_computation
async-update = ((f32[2,3]), f32[3,2], s32[]) async-update(async-start), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-update), calls=async_computation
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncStart and AsyncUpdate expect the op shape to be "
"in the form of "
"((async-operands), async-outputs, state).")));
}
TEST_F(HloParserTest, AsyncUpdateMissingOperandWrapper) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncUpdateMissingOperandWrapper {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], s32[]) async-start(p0), calls=async_computation
async-update = (f32[2,3], f32[3,2], s32[]) async-update(async-start), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-update), calls=async_computation
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncStart and AsyncUpdate expect the op shape to be "
"in the form of "
"((async-operands), async-outputs, state).")));
}
TEST_F(HloParserTest, AsyncOpTupleWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3])) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncStart and AsyncUpdate expect the op shape to be "
"in the form of "
"((async-operands), async-outputs, state).")));
}
TEST_F(HloParserTest, AsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
ROOT async-done = f32[2,3] custom-call-done(tuple)
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncUpdate and AsyncDone expect their operand to be "
"the previous async op.")));
}
TEST_F(HloParserTest, AsyncUpdateAndAsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(tuple)
ROOT async-done = f32[2,3] custom-call-done(tuple)
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncUpdate and AsyncDone expect their operand to be "
"the previous async op.")));
}
TEST_F(HloParserTest, AsyncUpdateWithSyntaxSugarWrongOp) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWithSyntaxSugarWrongOp
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) add-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async wrapped opcode to be custom-call, "
"but got add")));
}
TEST_F(HloParserTest, AsyncDoneWithSyntaxSugarWrongOp) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWithSyntaxSugarWrongOp
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} add-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async wrapped opcode to be custom-call, "
"but got add")));
}
TEST_F(HloParserTest, AsyncOpSharedComputation) {
const char* const hlo_string = R"(
HloModule AsyncOpSharedComputation
%async_wrapped (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start.0 = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped
%async-done.0 = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-start.0)
%async-start.1 = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped
ROOT %async-done.1 = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-start.1)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Computation async_wrapped is already referenced "
"by another async op")));
}
TEST_F(HloParserTest, AsyncUpdateWrongComputation) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWrongComputation
%async_wrapped.0 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
%async_wrapped.1 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped.0
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) async-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start), calls=%async_wrapped.1
ROOT %async-done = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_wrapped_computation to be async_wrapped.0, "
"but got async_wrapped.1")));
}
TEST_F(HloParserTest, AsyncDoneWrongComputation) {
const char* const hlo_string = R"(
HloModule AsyncDoneWrongComputation
%async_wrapped.0 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
%async_wrapped.1 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped.0
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) async-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update), calls=%async_wrapped.1
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_wrapped_computation to be async_wrapped.0, "
"but got async_wrapped.1")));
}
TEST_F(HloParserTest, AsyncUpdateWrongDefaultThread) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWrongDefaultThread
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start), async_execution_thread="foo_thread"
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_execution_thread to be main, "
"but got foo_thread")));
}
TEST_F(HloParserTest, AsyncDoneWrongDefaultThread) {
const char* const hlo_string = R"(
HloModule AsyncDoneWrongDefaultThread
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update), async_execution_thread="foo_thread"
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_execution_thread to be main, "
"but got foo_thread")));
}
TEST_F(HloParserTest, PipelinedSendRecv) {
const std::string hlo_string = R"(
HloModule test
cond {
param = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(1)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) parameter(0)
count = get-tuple-element(%param), index=0
recv.0 = (u32[2], u32[], token[]) get-tuple-element(param), index=1
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
send.0 = (u32[2], u32[], token[]) get-tuple-element(param), index=2
send-done.0 = (u32[2], token[]) recv-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.0.n = token[] after-all()
recv.0.n = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
after-all.1.n = token[] after-all()
send.0.n = (u32[2], u32[], token[]) send(recv-data.0, after-all.1.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) tuple(new_count, recv.0.n, send.0.n)
}
ENTRY test_computation {
c0 = u32[] constant(0)
init = u32[2] broadcast(c0), dimensions={}
after-all.0.p = token[] after-all()
recv.0.p = (u32[2], u32[], token[]) recv(after-all.0.p), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
after-all.1.p = token[] after-all()
send.0.p = (u32[2], u32[], token[]) send(init, after-all.1.p),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
while_init = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) tuple(c0, recv.0.p, send.0.p)
while_result = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) while(while_init), body=body, condition=cond
recv.0.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=1
recv-done.0.q = (u32[2], token[]) recv-done(recv.0.q), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send.0.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=2
send-done.0.q = token[] send-done(send.0.q), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT recv-data.0.q = u32[2] get-tuple-element(recv-done.0.q), index=0
})";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_EQ(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ReplicaIdWithLayout) {
const char* const hlo_string = R"(
HloModule ReplicaId
ENTRY ReplicaId {
ROOT replica-id.18600 = u32[]{:T(128)} replica-id()
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
EXPECT_TRUE(
module->entry_computation()->root_instruction()->shape().has_layout());
EXPECT_FALSE(module->entry_computation()
->root_instruction()
->shape()
.layout()
.tiles()
.empty());
}
TEST_F(HloParserTest, OriginalValueWithoutShape) {
const std::string hlo_string = R"(HloModule test
ENTRY %test {
%a = f32[2,10]{1,0} parameter(0), origin={{"a"}}
ROOT %v = abs(%a), origin={{"v"}}
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("expects instruction shape")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/parser/hlo_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/parser/hlo_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7e81246b-0365-4530-a3d8-790f88c0fd3b | cpp | tensorflow/tensorflow | unsorted_segment | tensorflow/lite/kernels/unsorted_segment.cc | tensorflow/lite/kernels/unsorted_segment_test.cc | #include <stdint.h>
#include <algorithm>
#include <functional>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unsorted_segment {
enum SegmentType {
kSegmentMax,
kSegmentMin,
kSegmentProd,
kSegmentSum,
};
static const int kInputDataTensor = 0;
static const int kInputSegmentIdsTensor = 1;
static const int kInputNumSegmentsTensor = 2;
static const int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* data,
const TfLiteTensor* segment_ids,
const TfLiteTensor* num_segments,
TfLiteTensor* output) {
const int segment_ids_rank = NumDimensions(segment_ids);
const int data_rank = NumDimensions(data);
TF_LITE_ENSURE(context, segment_ids_rank <= data_rank);
for (int i = 0; i < segment_ids_rank; ++i) {
TF_LITE_ENSURE_EQ(context, segment_ids->dims->data[i], data->dims->data[i]);
}
TF_LITE_ENSURE(context, (num_segments->dims->size == 1 &&
num_segments->dims->data[0] == 1) ||
num_segments->dims->size == 0);
int32_t num_segments_ = GetTensorData<int32_t>(num_segments)[0];
const int num_segment_ids = NumElements(segment_ids);
int max_index = -1;
for (int i = 0; i < num_segment_ids; i++) {
max_index = std::max(GetTensorData<int32_t>(segment_ids)[i], max_index);
}
TF_LITE_ENSURE(context, max_index < num_segments_);
const int output_rank = data_rank - segment_ids_rank + 1;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
output_shape->data[0] = num_segments_;
for (int i = segment_ids_rank; i < data_rank; ++i) {
output_shape->data[i - segment_ids_rank + 1] = data->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
const TfLiteTensor* num_segments;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kInputNumSegmentsTensor, &num_segments));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
data->type == kTfLiteInt32 || data->type == kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, num_segments->type, kTfLiteInt32);
if (IsDynamicTensor(data) || !IsConstantOrPersistentTensor(segment_ids) ||
!IsConstantOrPersistentTensor(num_segments)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, data, segment_ids, num_segments, output);
}
template <typename T>
struct SegmenMax {
inline T operator()(const T& a, const T& b) const { return std::max(a, b); }
static constexpr T kInitialValue = std::numeric_limits<T>::lowest();
};
template <typename T>
struct SegmenMin {
inline T operator()(const T& a, const T& b) const { return std::min(a, b); }
static constexpr T kInitialValue = std::numeric_limits<T>::max();
};
template <typename T>
struct SegmenProd {
inline T operator()(const T& a, const T& b) const { return a * b; }
static constexpr T kInitialValue = T(1);
};
template <typename T>
struct SegmenSum {
inline T operator()(const T& a, const T& b) const { return a + b; }
static constexpr T kInitialValue = T(0);
};
template <typename T>
TfLiteStatus EvalType(TfLiteContext* context, const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& segment_ids_shape,
const int32_t* segment_ids_data,
const RuntimeShape& output_shape, T* output_data,
SegmentType segment_type) {
switch (segment_type) {
case kSegmentProd:
reference_ops::UnsortedSegmentRef<T, SegmenProd>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
case kSegmentMax:
reference_ops::UnsortedSegmentRef<T, SegmenMax>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
case kSegmentSum:
reference_ops::UnsortedSegmentRef<T, SegmenSum>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
case kSegmentMin:
reference_ops::UnsortedSegmentRef<T, SegmenMin>(
input_shape, input_data, segment_ids_shape, segment_ids_data,
output_shape, output_data);
break;
default:
TF_LITE_KERNEL_LOG(context, "Not recognized segment type: %d",
segment_type);
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus EvalGeneric(TfLiteContext* context, TfLiteNode* node,
SegmentType segment_type) {
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
const TfLiteTensor* num_segments;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kInputNumSegmentsTensor, &num_segments));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, data, segment_ids,
num_segments, output));
}
TF_LITE_ENSURE_EQ(context, GetTensorShape(data).Dims(0),
GetTensorShape(segment_ids).Dims(0));
#define TF_LITE_UNSORTED_SEGMENT(dtype) \
EvalType<dtype>(context, GetTensorShape(data), GetTensorData<dtype>(data), \
GetTensorShape(segment_ids), \
GetTensorData<int32_t>(segment_ids), GetTensorShape(output), \
GetTensorData<dtype>(output), segment_type);
switch (data->type) {
case kTfLiteInt32:
TF_LITE_UNSORTED_SEGMENT(int32_t);
break;
case kTfLiteFloat32:
TF_LITE_UNSORTED_SEGMENT(float);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Currently UnsortedSegment doesn't support data type: %s",
TfLiteTypeGetName(data->type));
return kTfLiteError;
}
#undef TF_LITE_UNSORTED_SEGMENT
return kTfLiteOk;
}
TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentProd);
}
TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentMax);
}
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentSum);
}
TfLiteStatus EvalMin(TfLiteContext* context, TfLiteNode* node) {
return EvalGeneric(context, node, kSegmentMin);
}
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_PROD() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalProd};
return &r;
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_MAX() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalMax};
return &r;
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_SUM() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalSum};
return &r;
}
TfLiteRegistration* Register_UNSORTED_SEGMENT_MIN() {
static TfLiteRegistration r = {nullptr, nullptr, unsorted_segment::Prepare,
unsorted_segment::EvalMin};
return &r;
}
}
}
} | #include "tensorflow/lite/kernels/unsorted_segment_test.h"
#include <limits.h>
#include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
TEST_P(UnsortedSegmentTest, SegmentIdsSizeNotEqualToDataFirstDimensionFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {3, 2}}, {TensorType_INT32, {2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1});
model.PopulateTensor<int32_t>(model.num_segments(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest,
LargestSegmentIdPlusOneGreaterThanNumSegmentsFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2}}, {TensorType_INT32, {2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1});
model.PopulateTensor<int32_t>(model.num_segments(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest, NumSegmentsNotScalarShapeFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {3, 2}}, {TensorType_INT32, {3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1, 0});
model.PopulateTensor<int32_t>(model.num_segments(), {2, 1});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest, Rank2SegIdsNotPrefixFails) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2, 1}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {1, 1});
model.PopulateTensor<int32_t>(model.num_segments(), {3});
ASSERT_EQ(model.Invoke(), kTfLiteError);
}
TEST_P(UnsortedSegmentTest, Rank2SegIdsHasShapeNumSegDataShapeSuffix) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int32_t>(model.segment_ids(), {1, 2, 0, 8});
model.PopulateTensor<int32_t>(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10, 2}));
}
TEST_P(UnsortedSegmentTest, Rank2SegIdsHasShapeNumSegDataShapeSuffixConst) {
UnsortedSegmentModel<int32_t> model = getConstModel(
{TensorType_INT32, {2, 2, 2}}, {1, 2, -1, -1}, {2, 2}, {3}, {1});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({3, 2}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData2d) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2}}, {TensorType_INT32, {2, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4});
model.PopulateTensor<int32_t>(model.segment_ids(), {0, 1, 5, 2, 4});
model.PopulateTensor<int32_t>(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData2dConst) {
UnsortedSegmentModel<int32_t> model =
getConstModel({TensorType_INT32, {2, 2}}, {1, 1, 1, 1}, {2, 2}, {3}, {1});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({3}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData3d) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2, 2, 2}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6, 7, 8});
model.PopulateTensor<int32_t>(model.segment_ids(), {1, 2, 3, 4, 5, 6, 7, 8});
model.PopulateTensor<int32_t>(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10}));
}
TEST_P(UnsortedSegmentTest, SegIdsHasSameShapeAsData3dConst) {
UnsortedSegmentModel<int32_t> model =
getConstModel({TensorType_INT32, {2, 2, 2}}, {0, 1, 2, -1, 3, -1, 4, -1},
{2, 2, 2}, {8}, {1});
model.PopulateTensor<int32_t>(model.data(), {1, 1, 1, 1, 1, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({8}));
}
TEST_P(UnsortedSegmentTest, Data5dHasShapeNumSegDataShapeSuffix) {
UnsortedSegmentModel<int32_t> model =
getModel({TensorType_INT32, {2, 1, 2, 1, 2}}, {TensorType_INT32, {2, 1}},
{TensorType_INT32, {1}});
model.PopulateTensor<int32_t>(model.data(), {1, 2, 3, 4, 5, 6, 7, 8});
model.PopulateTensor(model.segment_ids(), {0, 1});
model.PopulateTensor(model.num_segments(), {10});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), testing::ElementsAreArray({10, 2, 1, 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unsorted_segment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unsorted_segment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93fad05c-1bed-42af-b8bd-1cf21c7c02af | cpp | tensorflow/tensorflow | model_cmdline_flags | tensorflow/lite/toco/model_cmdline_flags.cc | tensorflow/lite/toco/model_cmdline_flags_test.cc | #include "tensorflow/lite/toco/model_cmdline_flags.h"
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
#include "tensorflow/lite/toco/toco_port.h"
#ifdef PLATFORM_GOOGLE
#include "base/commandlineflags.h"
#endif
namespace toco {
bool ParseModelFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedModelFlags* parsed_model_flags_ptr) {
ParsedModelFlags& parsed_flags = *parsed_model_flags_ptr;
using tensorflow::Flag;
std::vector<tensorflow::Flag> flags = {
Flag("input_array", parsed_flags.input_array.bind(),
parsed_flags.input_array.default_value(),
"Deprecated: use --input_arrays instead. Name of the input array. "
"If not specified, will try to read "
"that information from the input file."),
Flag("input_arrays", parsed_flags.input_arrays.bind(),
parsed_flags.input_arrays.default_value(),
"Names of the input arrays, comma-separated. If not specified, "
"will try to read that information from the input file."),
Flag("output_array", parsed_flags.output_array.bind(),
parsed_flags.output_array.default_value(),
"Deprecated: use --output_arrays instead. Name of the output array, "
"when specifying a unique output array. "
"If not specified, will try to read that information from the "
"input file."),
Flag("output_arrays", parsed_flags.output_arrays.bind(),
parsed_flags.output_arrays.default_value(),
"Names of the output arrays, comma-separated. "
"If not specified, will try to read "
"that information from the input file."),
Flag("input_shape", parsed_flags.input_shape.bind(),
parsed_flags.input_shape.default_value(),
"Deprecated: use --input_shapes instead. Input array shape. For "
"many models the shape takes the form "
"batch size, input array height, input array width, input array "
"depth."),
Flag("input_shapes", parsed_flags.input_shapes.bind(),
parsed_flags.input_shapes.default_value(),
"Shapes corresponding to --input_arrays, colon-separated. For "
"many models each shape takes the form batch size, input array "
"height, input array width, input array depth."),
Flag("batch_size", parsed_flags.batch_size.bind(),
parsed_flags.batch_size.default_value(),
"Deprecated. Batch size for the model. Replaces the first dimension "
"of an input size array if undefined. Use only with SavedModels "
"when --input_shapes flag is not specified. Always use "
"--input_shapes flag with frozen graphs."),
Flag("input_data_type", parsed_flags.input_data_type.bind(),
parsed_flags.input_data_type.default_value(),
"Deprecated: use --input_data_types instead. Input array type, if "
"not already provided in the graph. "
"Typically needs to be specified when passing arbitrary arrays "
"to --input_arrays."),
Flag("input_data_types", parsed_flags.input_data_types.bind(),
parsed_flags.input_data_types.default_value(),
"Input arrays types, comma-separated, if not already provided in "
"the graph. "
"Typically needs to be specified when passing arbitrary arrays "
"to --input_arrays."),
Flag("mean_value", parsed_flags.mean_value.bind(),
parsed_flags.mean_value.default_value(),
"Deprecated: use --mean_values instead. mean_value parameter for "
"image models, used to compute input "
"activations from input pixel data."),
Flag("mean_values", parsed_flags.mean_values.bind(),
parsed_flags.mean_values.default_value(),
"mean_values parameter for image models, comma-separated list of "
"doubles, used to compute input activations from input pixel "
"data. Each entry in the list should match an entry in "
"--input_arrays."),
Flag("std_value", parsed_flags.std_value.bind(),
parsed_flags.std_value.default_value(),
"Deprecated: use --std_values instead. std_value parameter for "
"image models, used to compute input "
"activations from input pixel data."),
Flag("std_values", parsed_flags.std_values.bind(),
parsed_flags.std_values.default_value(),
"std_value parameter for image models, comma-separated list of "
"doubles, used to compute input activations from input pixel "
"data. Each entry in the list should match an entry in "
"--input_arrays."),
Flag("variable_batch", parsed_flags.variable_batch.bind(),
parsed_flags.variable_batch.default_value(),
"If true, the model accepts an arbitrary batch size. Mutually "
"exclusive "
"with the 'batch' field: at most one of these two fields can be "
"set."),
Flag("rnn_states", parsed_flags.rnn_states.bind(),
parsed_flags.rnn_states.default_value(), ""),
Flag("model_checks", parsed_flags.model_checks.bind(),
parsed_flags.model_checks.default_value(),
"A list of model checks to be applied to verify the form of the "
"model. Applied after the graph transformations after import."),
Flag("dump_graphviz", parsed_flags.dump_graphviz.bind(),
parsed_flags.dump_graphviz.default_value(),
"Dump graphviz during LogDump call. If string is non-empty then "
"it defines path to dump, otherwise will skip dumping."),
Flag("dump_graphviz_video", parsed_flags.dump_graphviz_video.bind(),
parsed_flags.dump_graphviz_video.default_value(),
"If true, will dump graphviz at each "
"graph transformation, which may be used to generate a video."),
Flag("conversion_summary_dir", parsed_flags.conversion_summary_dir.bind(),
parsed_flags.conversion_summary_dir.default_value(),
"Local file directory to store the conversion logs."),
Flag("allow_nonexistent_arrays",
parsed_flags.allow_nonexistent_arrays.bind(),
parsed_flags.allow_nonexistent_arrays.default_value(),
"If true, will allow passing inexistent arrays in --input_arrays "
"and --output_arrays. This makes little sense, is only useful to "
"more easily get graph visualizations."),
Flag("allow_nonascii_arrays", parsed_flags.allow_nonascii_arrays.bind(),
parsed_flags.allow_nonascii_arrays.default_value(),
"If true, will allow passing non-ascii-printable characters in "
"--input_arrays and --output_arrays. By default (if false), only "
"ascii printable characters are allowed, i.e. character codes "
"ranging from 32 to 127. This is disallowed by default so as to "
"catch common copy-and-paste issues where invisible unicode "
"characters are unwittingly added to these strings."),
Flag(
"arrays_extra_info_file", parsed_flags.arrays_extra_info_file.bind(),
parsed_flags.arrays_extra_info_file.default_value(),
"Path to an optional file containing a serialized ArraysExtraInfo "
"proto allowing to pass extra information about arrays not specified "
"in the input model file, such as extra MinMax information."),
Flag("model_flags_file", parsed_flags.model_flags_file.bind(),
parsed_flags.model_flags_file.default_value(),
"Path to an optional file containing a serialized ModelFlags proto. "
"Options specified on the command line will override the values in "
"the proto."),
Flag("change_concat_input_ranges",
parsed_flags.change_concat_input_ranges.bind(),
parsed_flags.change_concat_input_ranges.default_value(),
"Boolean to change the behavior of min/max ranges for inputs and"
" output of the concat operators."),
};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
if (!tensorflow::Flags::Parse(argc, argv, flags)) return false;
}
auto& dump_options = *GraphVizDumpOptions::singleton();
dump_options.dump_graphviz_video = parsed_flags.dump_graphviz_video.value();
dump_options.dump_graphviz = parsed_flags.dump_graphviz.value();
return true;
}
void ReadModelFlagsFromCommandLineFlags(
const ParsedModelFlags& parsed_model_flags, ModelFlags* model_flags) {
toco::port::CheckInitGoogleIsDone("InitGoogle is not done yet");
if (parsed_model_flags.model_flags_file.specified()) {
std::string model_flags_file_contents;
QCHECK(port::file::GetContents(parsed_model_flags.model_flags_file.value(),
&model_flags_file_contents,
port::file::Defaults())
.ok())
<< "Specified --model_flags_file="
<< parsed_model_flags.model_flags_file.value()
<< " was not found or could not be read";
QCHECK(ParseFromStringEitherTextOrBinary(model_flags_file_contents,
model_flags))
<< "Specified --model_flags_file="
<< parsed_model_flags.model_flags_file.value()
<< " could not be parsed";
}
#ifdef PLATFORM_GOOGLE
CHECK(!((base::WasPresentOnCommandLine("batch") &&
parsed_model_flags.variable_batch.specified())))
<< "The --batch and --variable_batch flags are mutually exclusive.";
#endif
CHECK(!(parsed_model_flags.output_array.specified() &&
parsed_model_flags.output_arrays.specified()))
<< "The --output_array and --vs flags are mutually exclusive.";
if (parsed_model_flags.output_array.specified()) {
model_flags->add_output_arrays(parsed_model_flags.output_array.value());
}
if (parsed_model_flags.output_arrays.specified()) {
std::vector<std::string> output_arrays =
absl::StrSplit(parsed_model_flags.output_arrays.value(), ',');
for (const std::string& output_array : output_arrays) {
model_flags->add_output_arrays(output_array);
}
}
const bool uses_single_input_flags =
parsed_model_flags.input_array.specified() ||
parsed_model_flags.mean_value.specified() ||
parsed_model_flags.std_value.specified() ||
parsed_model_flags.input_shape.specified();
const bool uses_multi_input_flags =
parsed_model_flags.input_arrays.specified() ||
parsed_model_flags.mean_values.specified() ||
parsed_model_flags.std_values.specified() ||
parsed_model_flags.input_shapes.specified();
QCHECK(!(uses_single_input_flags && uses_multi_input_flags))
<< "Use either the singular-form input flags (--input_array, "
"--input_shape, --mean_value, --std_value) or the plural form input "
"flags (--input_arrays, --input_shapes, --mean_values, --std_values), "
"but not both forms within the same command line.";
if (parsed_model_flags.input_array.specified()) {
QCHECK(uses_single_input_flags);
model_flags->add_input_arrays()->set_name(
parsed_model_flags.input_array.value());
}
if (parsed_model_flags.input_arrays.specified()) {
QCHECK(uses_multi_input_flags);
for (const auto& input_array :
absl::StrSplit(parsed_model_flags.input_arrays.value(), ',')) {
model_flags->add_input_arrays()->set_name(std::string(input_array));
}
}
if (parsed_model_flags.mean_value.specified()) {
QCHECK(uses_single_input_flags);
model_flags->mutable_input_arrays(0)->set_mean_value(
parsed_model_flags.mean_value.value());
}
if (parsed_model_flags.mean_values.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> mean_values =
absl::StrSplit(parsed_model_flags.mean_values.value(), ',');
QCHECK(static_cast<int>(mean_values.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < mean_values.size(); ++i) {
char* last = nullptr;
model_flags->mutable_input_arrays(i)->set_mean_value(
strtod(mean_values[i].data(), &last));
CHECK(last != mean_values[i].data());
}
}
if (parsed_model_flags.std_value.specified()) {
QCHECK(uses_single_input_flags);
model_flags->mutable_input_arrays(0)->set_std_value(
parsed_model_flags.std_value.value());
}
if (parsed_model_flags.std_values.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> std_values =
absl::StrSplit(parsed_model_flags.std_values.value(), ',');
QCHECK(static_cast<int>(std_values.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < std_values.size(); ++i) {
char* last = nullptr;
model_flags->mutable_input_arrays(i)->set_std_value(
strtod(std_values[i].data(), &last));
CHECK(last != std_values[i].data());
}
}
if (parsed_model_flags.input_data_type.specified()) {
QCHECK(uses_single_input_flags);
IODataType type;
QCHECK(IODataType_Parse(parsed_model_flags.input_data_type.value(), &type));
model_flags->mutable_input_arrays(0)->set_data_type(type);
}
if (parsed_model_flags.input_data_types.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> input_data_types =
absl::StrSplit(parsed_model_flags.input_data_types.value(), ',');
QCHECK(static_cast<int>(input_data_types.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < input_data_types.size(); ++i) {
IODataType type;
QCHECK(IODataType_Parse(input_data_types[i], &type));
model_flags->mutable_input_arrays(i)->set_data_type(type);
}
}
if (parsed_model_flags.input_shape.specified()) {
QCHECK(uses_single_input_flags);
if (model_flags->input_arrays().empty()) {
model_flags->add_input_arrays();
}
auto* shape = model_flags->mutable_input_arrays(0)->mutable_shape();
shape->clear_dims();
const IntList& list = parsed_model_flags.input_shape.value();
for (auto& dim : list.elements) {
shape->add_dims(dim);
}
}
if (parsed_model_flags.input_shapes.specified()) {
QCHECK(uses_multi_input_flags);
std::vector<std::string> input_shapes =
absl::StrSplit(parsed_model_flags.input_shapes.value(), ':');
QCHECK(static_cast<int>(input_shapes.size()) ==
model_flags->input_arrays_size());
for (size_t i = 0; i < input_shapes.size(); ++i) {
auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape();
shape->clear_dims();
if (input_shapes[i].empty()) {
continue;
}
for (const auto& dim_str : absl::StrSplit(input_shapes[i], ',')) {
int size;
CHECK(absl::SimpleAtoi(dim_str, &size))
<< "Failed to parse input_shape: " << input_shapes[i];
shape->add_dims(size);
}
}
}
#define READ_MODEL_FLAG(name) \
do { \
if (parsed_model_flags.name.specified()) { \
model_flags->set_##name(parsed_model_flags.name.value()); \
} \
} while (false)
READ_MODEL_FLAG(variable_batch);
#undef READ_MODEL_FLAG
for (const auto& element : parsed_model_flags.rnn_states.value().elements) {
auto* rnn_state_proto = model_flags->add_rnn_states();
for (const auto& kv_pair : element) {
const std::string& key = kv_pair.first;
const std::string& value = kv_pair.second;
if (key == "state_array") {
rnn_state_proto->set_state_array(value);
} else if (key == "back_edge_source_array") {
rnn_state_proto->set_back_edge_source_array(value);
} else if (key == "size") {
int32_t size = 0;
CHECK(absl::SimpleAtoi(value, &size));
CHECK_GT(size, 0);
rnn_state_proto->set_size(size);
} else if (key == "num_dims") {
int32_t size = 0;
CHECK(absl::SimpleAtoi(value, &size));
CHECK_GT(size, 0);
rnn_state_proto->set_num_dims(size);
} else {
LOG(FATAL) << "Unknown key '" << key << "' in --rnn_states";
}
}
CHECK(rnn_state_proto->has_state_array() &&
rnn_state_proto->has_back_edge_source_array() &&
rnn_state_proto->has_size())
<< "--rnn_states must include state_array, back_edge_source_array and "
"size.";
}
for (const auto& element : parsed_model_flags.model_checks.value().elements) {
auto* model_check_proto = model_flags->add_model_checks();
for (const auto& kv_pair : element) {
const std::string& key = kv_pair.first;
const std::string& value = kv_pair.second;
if (key == "count_type") {
model_check_proto->set_count_type(value);
} else if (key == "count_min") {
int32_t count = 0;
CHECK(absl::SimpleAtoi(value, &count));
CHECK_GE(count, -1);
model_check_proto->set_count_min(count);
} else if (key == "count_max") {
int32_t count = 0;
CHECK(absl::SimpleAtoi(value, &count));
CHECK_GE(count, -1);
model_check_proto->set_count_max(count);
} else {
LOG(FATAL) << "Unknown key '" << key << "' in --model_checks";
}
}
}
if (!model_flags->has_allow_nonascii_arrays()) {
model_flags->set_allow_nonascii_arrays(
parsed_model_flags.allow_nonascii_arrays.value());
}
if (!model_flags->has_allow_nonexistent_arrays()) {
model_flags->set_allow_nonexistent_arrays(
parsed_model_flags.allow_nonexistent_arrays.value());
}
if (!model_flags->has_change_concat_input_ranges()) {
model_flags->set_change_concat_input_ranges(
parsed_model_flags.change_concat_input_ranges.value());
}
if (parsed_model_flags.arrays_extra_info_file.specified()) {
std::string arrays_extra_info_file_contents;
CHECK(port::file::GetContents(
parsed_model_flags.arrays_extra_info_file.value(),
&arrays_extra_info_file_contents, port::file::Defaults())
.ok());
ParseFromStringEitherTextOrBinary(arrays_extra_info_file_contents,
model_flags->mutable_arrays_extra_info());
}
}
ParsedModelFlags* UncheckedGlobalParsedModelFlags(bool must_already_exist) {
static auto* flags = [must_already_exist]() {
if (must_already_exist) {
fprintf(stderr, __FILE__
":"
"GlobalParsedModelFlags() used without initialization\n");
fflush(stderr);
abort();
}
return new toco::ParsedModelFlags;
}();
return flags;
}
ParsedModelFlags* GlobalParsedModelFlags() {
return UncheckedGlobalParsedModelFlags(true);
}
void ParseModelFlagsOrDie(int* argc, char* argv[]) {
auto* flags = UncheckedGlobalParsedModelFlags(false);
std::string msg;
bool model_success =
toco::ParseModelFlagsFromCommandLineFlags(argc, argv, &msg, flags);
if (!model_success || !msg.empty()) {
fprintf(stderr, "%s", msg.c_str());
fflush(stderr);
abort();
}
}
} | #include <string>
#include <unordered_map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/model_cmdline_flags.h"
namespace toco {
namespace {
TEST(ModelCmdlineFlagsTest, ParseArgsStringMapList) {
int args_count = 3;
const char* args[] = {
"toco", "--input_arrays=input_1",
"--rnn_states={state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}",
nullptr};
std::string expected_input_arrays = "input_1";
std::vector<std::unordered_map<std::string, std::string>> expected_rnn_states;
expected_rnn_states.push_back(
{{"state_array", "rnn/BasicLSTMCellZeroState/zeros"},
{"back_edge_source_array", "rnn/basic_lstm_cell/Add_1"},
{"size", "4"}});
expected_rnn_states.push_back(
{{"state_array", "rnn/BasicLSTMCellZeroState/zeros_1"},
{"back_edge_source_array", "rnn/basic_lstm_cell/Mul_2"},
{"size", "4"}});
std::string message;
ParsedModelFlags result_flags;
EXPECT_TRUE(ParseModelFlagsFromCommandLineFlags(
&args_count, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.input_arrays.value(), expected_input_arrays);
EXPECT_EQ(result_flags.rnn_states.value().elements, expected_rnn_states);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/model_cmdline_flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/model_cmdline_flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f18fb34-6236-41ee-9f97-e4b07328de32 | cpp | tensorflow/tensorflow | gather_nd_op | tensorflow/core/kernels/gather_nd_op.cc | tensorflow/core/kernels/gather_nd_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/gather_nd_op.h"
#include <string>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bad_indices_policy.h"
namespace tensorflow {
namespace {
constexpr char kBadIndicesPolicyAtrr[] = "bad_indices_policy";
}
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename Index>
class GatherNdOp : public OpKernel {
public:
explicit GatherNdOp(OpKernelConstruction* c) : OpKernel(c) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType index_t = DataTypeToEnum<Index>::v();
OP_REQUIRES_OK(c, c->MatchSignature({dt, index_t}, {dt}));
if (c->HasAttr(kBadIndicesPolicyAtrr)) {
std::string bad_indices_policy_str;
OP_REQUIRES_OK(
c, c->GetAttr(kBadIndicesPolicyAtrr, &bad_indices_policy_str));
absl::StatusOr<BadIndicesPolicy> bad_indices_policy =
BadIndicesPolicyFromString(bad_indices_policy_str);
OP_REQUIRES_OK(c, bad_indices_policy.status());
bad_indices_policy_ = *bad_indices_policy;
}
}
void Compute(OpKernelContext* c) override {
const Tensor& params = c->input(0);
const Tensor& indices = c->input(1);
Tensor out;
OP_REQUIRES_OK(c, functor::DoGatherNd<Device, T, Index>(
c, params, indices, &out, bad_indices_policy_));
c->set_output(0, out);
}
private:
BadIndicesPolicy bad_indices_policy_ = BadIndicesPolicy::kDefault;
};
#define REGISTER_GATHER_ND_FULL(dev, type, index_type) \
REGISTER_KERNEL_BUILDER( \
Name("GatherNd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("Tparams") \
.TypeConstraint<index_type>("Tindices") \
.AttrConstraint<std::string>( \
"bad_indices_policy", \
{"", "DEFAULT", "ERROR", "IGNORE"}), \
GatherNdOp<dev##Device, type, index_type>)
#define REGISTER_GATHER_ND_CPU(type) \
REGISTER_GATHER_ND_FULL(CPU, type, int16); \
REGISTER_GATHER_ND_FULL(CPU, type, int32); \
REGISTER_GATHER_ND_FULL(CPU, type, int64_t)
TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_QUANTIZED_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_float8_e5m2(REGISTER_GATHER_ND_CPU);
TF_CALL_float8_e4m3fn(REGISTER_GATHER_ND_CPU);
#undef REGISTER_GATHER_ND_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, NDIM) \
template <> \
Index GatherNdSlice<GPUDevice, T, Index, NDIM>::operator()( \
const GPUDevice& d, const Index slice_size, \
typename TTypes<int32>::Scalar Tscratch, \
typename TTypes<T, NDIM + 1>::ConstTensor Tparams, \
typename TTypes<Index>::ConstMatrix Tindices, \
typename TTypes<T>::Matrix Tout); \
extern template struct GatherNdSlice<GPUDevice, T, Index, NDIM>;
#define DECLARE_GPU_SPECS_INDEX(T, Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 0); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 4); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 5); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 6); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 7);
#define DECLARE_GPU_SPECS(T) \
DECLARE_GPU_SPECS_INDEX(T, int32); \
DECLARE_GPU_SPECS_INDEX(T, int64_t)
TF_CALL_int32(DECLARE_GPU_SPECS);
TF_CALL_int64(DECLARE_GPU_SPECS);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS);
TF_CALL_COMPLEX_TYPES(DECLARE_GPU_SPECS);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_INDEX
}
#undef REGISTER_GATHER_ND_FULL
#define REGISTER_GATHER_ND_FULL(dev, type, index_type) \
REGISTER_KERNEL_BUILDER( \
Name("GatherNd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("Tparams") \
.TypeConstraint<index_type>("Tindices") \
.AttrConstraint<std::string>("bad_indices_policy", \
{"", "DEFAULT", "IGNORE"}), \
GatherNdOp<dev##Device, type, index_type>)
#define REGISTER_GATHER_ND_GPU(type) \
REGISTER_GATHER_ND_FULL(GPU, type, int32); \
REGISTER_GATHER_ND_FULL(GPU, type, int64_t)
TF_CALL_int32(REGISTER_GATHER_ND_GPU);
TF_CALL_int64(REGISTER_GATHER_ND_GPU);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GATHER_ND_GPU);
TF_CALL_COMPLEX_TYPES(REGISTER_GATHER_ND_GPU);
#undef REGISTER_GATHER_ND_GPU
#endif
#undef REGISTER_GATHER_ND_FULL
} | #include <functional>
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace test {
namespace graph {
class Node* GatherNd(Graph* g, class Node* in0, class Node* in1) {
class Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "GatherNd")
.Input(in0)
.Input(in1)
.Finalize(g, &ret));
return ret;
}
}
}
namespace {
class GatherNdOpTest : public OpsTestBase {
protected:
void MakeOp(DataType param_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(param_type))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(GatherNdOpTest, Simple) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected, {8, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(GatherNdOpTest, Error_OutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 5});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.message(), "indices[1] = [5] does not index into param shape [5]"))
<< s.message();
}
TEST_F(GatherNdOpTest, Quantized_UINT8) {
MakeOp(DT_QUINT8, DT_INT32);
AddInputFromArray<quint8>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2}));
test::FillValues<quint8>(&expected, {8, 4});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
}
TEST_F(GatherNdOpTest, Quantized_INT8) {
MakeOp(DT_QINT8, DT_INT32);
AddInputFromArray<qint8>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({2}));
test::FillValues<qint8>(&expected, {8, 4});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
class GatherNdOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType param_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(param_type))
.Input(FakeInput(index_type))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(GatherNdOpIgnoreBadIndicesTest, IgnoreOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {9, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({3, 1}), {3, 5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {8, 0, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class GatherNdOpConstructionTest : public OpsTestBase {};
TEST_F(GatherNdOpConstructionTest, Error_BadIndicesPolicyInvalid) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY")
.Finalize(node_def()));
EXPECT_NE(InitOp(), absl::OkStatus());
}
constexpr int kLookups = 2000;
template <typename Index>
static Graph* GatherNd(int dim) {
Graph* g = new Graph(OpRegistry::Global());
Tensor params(DT_FLOAT, TensorShape({dim, 8, 16, 32}));
params.flat<float>().setRandom();
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
Tensor indices(DataTypeToEnum<Index>::value, TensorShape({kLookups, 4}));
auto indices_mat = indices.matrix<Index>();
for (int i = 0; i < kLookups; i++) {
indices_mat(i, 0) = rnd.Uniform(dim);
indices_mat(i, 1) = rnd.Uniform(8);
indices_mat(i, 2) = rnd.Uniform(16);
indices_mat(i, 3) = rnd.Uniform(32);
}
test::graph::GatherNd(g, test::graph::Constant(g, params),
test::graph::Constant(g, indices));
return g;
}
#define BM_GATHER_ND(DEVICE, INDEX) \
static void BM_##DEVICE##_gather_nd_##INDEX( \
::testing::benchmark::State& state) { \
const int dim = state.range(0); \
test::Benchmark(#DEVICE, GatherNd<INDEX>(dim), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * kLookups * 4; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_gather_nd_##INDEX) \
->UseRealTime() \
->Arg(10) \
->Arg(100) \
->Arg(1000) \
->Arg(10000)
BM_GATHER_ND(cpu, int32);
BM_GATHER_ND(gpu, int32);
BM_GATHER_ND(cpu, int64_t);
BM_GATHER_ND(gpu, int64_t);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/gather_nd_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/gather_nd_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac17a735-6f0a-4a24-a913-698d9809b098 | cpp | tensorflow/tensorflow | conv_padding_legalization | third_party/xla/xla/service/gpu/transforms/conv_padding_legalization.cc | third_party/xla/xla/service/gpu/transforms/conv_padding_legalization_test.cc | #include "xla/service/gpu/transforms/conv_padding_legalization.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsForwardConvolutionCanonical(const HloInstruction& conv) {
CHECK(conv.custom_call_target() == kCudnnConvForwardCallTarget ||
conv.custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget ||
conv.custom_call_target() == kCudnnConvForwardGraphCallTarget);
return window_util::HasSymmetricPadding(conv.window()) &&
!window_util::HasNegativePadding(conv.window()) &&
!window_util::HasDilation(conv.window());
}
HloInstruction* MaybePaddedAndSlicedInput(
Window* conv_window, const ConvolutionDimensionNumbers& conv_dnums,
HloInstruction* input) {
HloComputation* computation = input->parent();
if (!window_util::HasSymmetricPadding(*conv_window) ||
window_util::HasBaseDilation(*conv_window)) {
PaddingConfig padding_config =
MakeNoPaddingConfig(input->shape().dimensions_size());
for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.input_spatial_dimensions(i);
if (conv_window->dimensions(i).padding_low() > 0) {
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
conv_window->dimensions(i).padding_low());
conv_window->mutable_dimensions(i)->set_padding_low(0);
}
if (conv_window->dimensions(i).padding_high() > 0) {
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
conv_window->dimensions(i).padding_high());
conv_window->mutable_dimensions(i)->set_padding_high(0);
}
if (conv_window->dimensions(i).base_dilation() != 1) {
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window->dimensions(i).base_dilation() - 1);
conv_window->mutable_dimensions(i)->set_base_dilation(1);
}
}
PrimitiveType element_type = input->shape().element_type();
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
input =
MakePadHlo(input, padding, padding_config, &input->metadata()).value();
}
if (window_util::HasNegativePadding(*conv_window)) {
std::vector<int64_t> start_indices(input->shape().dimensions_size(), 0);
std::vector<int64_t> limit_indices(input->shape().dimensions().begin(),
input->shape().dimensions().end());
std::vector<int64_t> strides(input->shape().dimensions_size(), 1);
for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.input_spatial_dimensions(i);
if (conv_window->dimensions(i).padding_low() < 0) {
start_indices[dim] += -conv_window->dimensions(i).padding_low();
conv_window->mutable_dimensions(i)->set_padding_low(0);
}
if (conv_window->dimensions(i).padding_high() < 0) {
limit_indices[dim] -= -conv_window->dimensions(i).padding_high();
conv_window->mutable_dimensions(i)->set_padding_high(0);
}
}
input = MakeSliceHlo(input, start_indices, limit_indices, strides).value();
}
return input;
}
HloInstruction* MaybePaddedKernel(const Window& conv_window,
const ConvolutionDimensionNumbers& conv_dnums,
HloInstruction* kernel) {
if (!window_util::HasWindowDilation(conv_window)) {
return kernel;
}
PaddingConfig padding_config;
padding_config.mutable_dimensions()->Reserve(
kernel->shape().dimensions_size());
for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) {
padding_config.add_dimensions();
}
for (size_t i = 0; i < conv_dnums.kernel_spatial_dimensions().size(); ++i) {
int64_t dim = conv_dnums.kernel_spatial_dimensions(i);
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window.dimensions(i).window_dilation() - 1);
}
HloComputation* computation = kernel->parent();
PrimitiveType element_type = kernel->shape().element_type();
HloInstruction* padding = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
return MakePadHlo(kernel, padding, padding_config, &kernel->metadata())
.value();
}
}
bool ConvPaddingLegalization::CanonicalizeForwardConvolution(
HloInstruction* conv) {
if (IsForwardConvolutionCanonical(*conv)) {
return false;
}
Window new_conv_window = conv->window();
HloInstruction* new_input = MaybePaddedAndSlicedInput(
&new_conv_window, conv->convolution_dimension_numbers(),
conv->mutable_operand(0));
HloInstruction* new_kernel =
MaybePaddedKernel(new_conv_window, conv->convolution_dimension_numbers(),
conv->mutable_operand(1));
for (size_t i = 0; i < new_conv_window.dimensions_size(); ++i) {
WindowDimension* dim = new_conv_window.mutable_dimensions(i);
dim->set_size(new_kernel->shape().dimensions(
conv->convolution_dimension_numbers().kernel_spatial_dimensions(i)));
dim->set_window_dilation(1);
}
VLOG(1) << "Canonicalizing forward conv";
std::vector<HloInstruction*> operands(conv->operands().begin(),
conv->operands().end());
operands[0] = new_input;
operands[1] = new_kernel;
auto new_conv = conv->parent()->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), operands));
new_conv->set_window(new_conv_window);
VLOG(1) << "Replacing:\n " << conv->ToString() << "\nwith:\n "
<< new_conv->ToString();
TF_CHECK_OK(conv->parent()->ReplaceInstruction(conv, new_conv));
return true;
}
namespace {
void IncreasePaddingLowBy(int64_t delta, WindowDimension* window_dim) {
window_dim->set_padding_low(window_dim->padding_low() + delta);
}
void IncreasePaddingHighBy(int64_t delta, WindowDimension* window_dim) {
window_dim->set_padding_high(window_dim->padding_high() + delta);
}
}
bool ConvPaddingLegalization::CanonicalizeBackwardFilterConvolution(
HloInstruction* backward_conv) {
CHECK_EQ(backward_conv->custom_call_target(),
kCudnnConvBackwardFilterCallTarget);
if (window_util::HasSymmetricPadding(backward_conv->window())) {
return false;
}
HloInstruction* input = backward_conv->mutable_operand(0);
Window new_backward_conv_window = backward_conv->window();
PaddingConfig input_padding_config =
MakeNoPaddingConfig(input->shape().rank());
ConvolutionDimensionNumbers backward_conv_dnums =
backward_conv->convolution_dimension_numbers();
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
if (padding_low < 0 || padding_high < 0) {
return false;
}
int64_t new_conv_padding = std::min(padding_low, padding_high);
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_low(
padding_low - new_conv_padding);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
padding_high - new_conv_padding);
auto* new_dim = new_backward_conv_window.mutable_dimensions(i);
new_dim->set_padding_low(new_conv_padding);
new_dim->set_padding_high(new_conv_padding);
}
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(1);
HloInstruction* padding =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(input->shape().element_type())));
HloInstruction* padded_input =
MakePadHlo(input, padding, input_padding_config).value();
HloInstruction* new_backward_conv =
computation->AddInstruction(backward_conv->CloneWithNewOperands(
backward_conv->shape(), {padded_input, output}));
new_backward_conv->set_window(new_backward_conv_window);
VLOG(1) << "Canonicalizing backward filter conv";
VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n "
<< new_backward_conv->ToString();
TF_CHECK_OK(
computation->ReplaceInstruction(backward_conv, new_backward_conv));
return true;
}
bool ConvPaddingLegalization::CanonicalizeBackwardInputConvolution(
HloInstruction* backward_conv) {
if (window_util::HasSymmetricPadding(backward_conv->window())) {
return false;
}
Window new_backward_conv_window = backward_conv->window();
ConvolutionDimensionNumbers backward_conv_dnums =
backward_conv->convolution_dimension_numbers();
Shape backward_conv_shape = backward_conv->shape().tuple_shapes(0);
Shape new_backward_conv_shape = backward_conv_shape;
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
if (padding_low < 0 || padding_high < 0) {
return false;
}
if (padding_low > padding_high) {
IncreasePaddingLowBy(padding_high - padding_low,
new_backward_conv_window.mutable_dimensions(i));
} else if (padding_low < padding_high) {
IncreasePaddingHighBy(padding_low - padding_high,
new_backward_conv_window.mutable_dimensions(i));
}
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
new_backward_conv_shape.set_dimensions(
dim, new_backward_conv_shape.dimensions(dim) +
std::abs(padding_low - padding_high));
}
HloComputation* computation = backward_conv->parent();
HloInstruction* output = backward_conv->mutable_operand(0);
HloInstruction* filter = backward_conv->mutable_operand(1);
HloInstruction* new_backward_conv_call =
computation->AddInstruction(backward_conv->CloneWithNewOperands(
ShapeUtil::MakeTupleShape(
{new_backward_conv_shape, ShapeUtil::MakeShape(U8, {0})}),
{output, filter}));
new_backward_conv_call->set_window(new_backward_conv_window);
HloInstruction* new_backward_conv =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_backward_conv_shape, new_backward_conv_call, 0));
HloInstruction* new_backward_conv_scratch =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
new_backward_conv_call->shape().tuple_shapes(1),
new_backward_conv_call, 1));
std::vector<int64_t> start_indices(
new_backward_conv->shape().dimensions_size(), 0LL);
std::vector<int64_t> limit_indices(
new_backward_conv->shape().dimensions().begin(),
new_backward_conv->shape().dimensions().end());
std::vector<int64_t> strides(new_backward_conv->shape().dimensions_size(),
1LL);
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64_t padding_low = backward_conv->window().dimensions(i).padding_low();
int64_t padding_high = backward_conv->window().dimensions(i).padding_high();
int64_t dim = backward_conv_dnums.input_spatial_dimensions(i);
if (padding_low > padding_high) {
start_indices[dim] += padding_low - padding_high;
} else if (padding_low < padding_high) {
limit_indices[dim] -= padding_high - padding_low;
}
}
Shape slice_shape =
ShapeInference::InferSliceShape(new_backward_conv->shape(), start_indices,
limit_indices, strides)
.value();
CHECK(ShapeUtil::Compatible(slice_shape, backward_conv_shape))
<< ShapeUtil::HumanString(slice_shape) << " vs "
<< ShapeUtil::HumanString(backward_conv_shape);
HloInstruction* slice = computation->AddInstruction(
HloInstruction::CreateSlice(backward_conv_shape, new_backward_conv,
start_indices, limit_indices, strides));
HloInstruction* new_tuple = computation->AddInstruction(
HloInstruction::CreateTuple({slice, new_backward_conv_scratch}));
VLOG(1) << "Canonicalizing backward input conv";
VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n "
<< new_tuple->ToString();
TF_CHECK_OK(computation->ReplaceInstruction(backward_conv, new_tuple));
return true;
}
absl::StatusOr<bool> ConvPaddingLegalization::RunOnComputation(
HloComputation* computation) {
bool changed = false;
std::vector<HloCustomCallInstruction*> convs;
for (auto* instr : computation->instructions()) {
if (IsCustomCallToDnnConvolution(*instr)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
for (HloCustomCallInstruction* instruction : convs) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instruction));
changed |= [&] {
switch (kind) {
case CudnnConvKind::kForward:
case CudnnConvKind::kForwardActivation:
case CudnnConvKind::kForwardGraph:
return CanonicalizeForwardConvolution(instruction);
case CudnnConvKind::kBackwardInput:
return CanonicalizeBackwardInputConvolution(instruction);
case CudnnConvKind::kBackwardFilter:
return CanonicalizeBackwardFilterConvolution(instruction);
}
}();
}
return changed;
}
absl::StatusOr<bool> ConvPaddingLegalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/conv_padding_legalization.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ConvPaddingLegalizationTest = HloTestBase;
TEST_F(ConvPaddingLegalizationTest, BackwardInputConvolve) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule convolution_module
ENTRY %convolution (operand f64[2,2,2,3]{3,2,1,0}) -> (f64[2,2,4,4]{3,2,1,0}, u8[0]) {
%operand = f64[2,2,2,3]{3,2,1,0} parameter(0)
%kernel = f64[2,3,2,3]{3,2,1,0} constant(
{
{
{
{ 0.29629629629629628, 0.30246913580246915, 0.30864197530864196 },
{ 0.31481481481481483, 0.32098765432098764, 0.3271604938271605 }
},
{
{ 0.25925925925925924, 0.26543209876543211, 0.27160493827160492 },
{ 0.27777777777777779, 0.2839506172839506, 0.29012345679012347 }
},
{
{ 0.22222222222222221, 0.22839506172839505, 0.23456790123456789 },
{ 0.24074074074074073, 0.24691358024691357, 0.25308641975308643 }
}
},
{
{
{ 0.18518518518518517, 0.19135802469135801, 0.19753086419753085 },
{ 0.20370370370370369, 0.20987654320987653, 0.21604938271604937 }
},
{
{ 0.14814814814814814, 0.15432098765432098, 0.16049382716049382 },
{ 0.16666666666666666, 0.1728395061728395, 0.17901234567901234 }
},
{
{ 0.1111111111111111, 0.11728395061728394, 0.12345679012345678 },
{ 0.12962962962962962, 0.13580246913580246, 0.1419753086419753 }
}
}
})
%reverse = f64[2,3,2,3]{3,2,1,0} reverse(%kernel), dimensions={0,1}
ROOT %custom-call = (f64[2,2,4,4]{3,2,1,0}, u8[0]{0}) custom-call(f64[2,2,2,3]{3,2,1,0} %operand, f64[2,3,2,3]{3,2,1,0} %reverse), window={size=2x3 stride=2x2 pad=0_0x0_1}, dim_labels=bf01_01io->b01f, custom_call_target="__cudnn$convBackwardInput", backend_config="{\"algorithm\":\"0\",\"tensor_ops_enabled\":false,\"conv_result_scale\":1,\"activation_mode\":\"0\",\"side_input_scale\":0}"
}
)")
.value();
ASSERT_TRUE(ConvPaddingLegalization().Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Tuple(
m::Slice(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Op(), m::Reverse(m::Constant())),
0)),
m::GetTupleElement())));
auto slice = root->operand(0);
Shape expected_slice_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 4});
EXPECT_TRUE(ShapeUtil::Equal(slice->shape(), expected_slice_shape));
auto conv = slice->operand(0);
Shape expected_conv_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 5});
EXPECT_TRUE(ShapeUtil::Equal(conv->shape(), expected_conv_shape));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_padding_legalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_padding_legalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9d6e50b-861a-4c55-b233-20860fc7a291 | cpp | google/quiche | web_transport_http3 | quiche/quic/core/http/web_transport_http3.cc | quiche/quic/core/http/web_transport_http3_test.cc | #include "quiche/quic/core/http/web_transport_http3.h"
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/http/quic_spdy_stream.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/capsule.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/web_transport/web_transport.h"
#define ENDPOINT \
(session_->perspective() == Perspective::IS_SERVER ? "Server: " : "Client: ")
namespace quic {
namespace {
class NoopWebTransportVisitor : public WebTransportVisitor {
void OnSessionReady() override {}
void OnSessionClosed(WebTransportSessionError ,
const std::string& ) override {}
void OnIncomingBidirectionalStreamAvailable() override {}
void OnIncomingUnidirectionalStreamAvailable() override {}
void OnDatagramReceived(absl::string_view ) override {}
void OnCanCreateNewOutgoingBidirectionalStream() override {}
void OnCanCreateNewOutgoingUnidirectionalStream() override {}
};
}
WebTransportHttp3::WebTransportHttp3(QuicSpdySession* session,
QuicSpdyStream* connect_stream,
WebTransportSessionId id)
: session_(session),
connect_stream_(connect_stream),
id_(id),
visitor_(std::make_unique<NoopWebTransportVisitor>()) {
QUICHE_DCHECK(session_->SupportsWebTransport());
QUICHE_DCHECK(IsValidWebTransportSessionId(id, session_->version()));
QUICHE_DCHECK_EQ(connect_stream_->id(), id);
connect_stream_->RegisterHttp3DatagramVisitor(this);
}
void WebTransportHttp3::AssociateStream(QuicStreamId stream_id) {
streams_.insert(stream_id);
ParsedQuicVersion version = session_->version();
if (QuicUtils::IsOutgoingStreamId(version, stream_id,
session_->perspective())) {
return;
}
if (QuicUtils::IsBidirectionalStreamId(stream_id, version)) {
incoming_bidirectional_streams_.push_back(stream_id);
visitor_->OnIncomingBidirectionalStreamAvailable();
} else {
incoming_unidirectional_streams_.push_back(stream_id);
visitor_->OnIncomingUnidirectionalStreamAvailable();
}
}
void WebTransportHttp3::OnConnectStreamClosing() {
std::vector<QuicStreamId> streams(streams_.begin(), streams_.end());
streams_.clear();
for (QuicStreamId id : streams) {
session_->ResetStream(id, QUIC_STREAM_WEBTRANSPORT_SESSION_GONE);
}
connect_stream_->UnregisterHttp3DatagramVisitor();
MaybeNotifyClose();
}
void WebTransportHttp3::CloseSession(WebTransportSessionError error_code,
absl::string_view error_message) {
if (close_sent_) {
QUIC_BUG(WebTransportHttp3 close sent twice)
<< "Calling WebTransportHttp3::CloseSession() more than once is not "
"allowed.";
return;
}
close_sent_ = true;
if (close_received_) {
QUIC_DLOG(INFO) << "Not sending CLOSE_WEBTRANSPORT_SESSION as we've "
"already sent one from peer.";
return;
}
error_code_ = error_code;
error_message_ = std::string(error_message);
QuicConnection::ScopedPacketFlusher flusher(
connect_stream_->spdy_session()->connection());
connect_stream_->WriteCapsule(
quiche::Capsule::CloseWebTransportSession(error_code, error_message),
true);
}
void WebTransportHttp3::OnCloseReceived(WebTransportSessionError error_code,
absl::string_view error_message) {
if (close_received_) {
QUIC_BUG(WebTransportHttp3 notified of close received twice)
<< "WebTransportHttp3::OnCloseReceived() may be only called once.";
}
close_received_ = true;
if (close_sent_) {
QUIC_DLOG(INFO) << "Ignoring received CLOSE_WEBTRANSPORT_SESSION as we've "
"already sent our own.";
return;
}
error_code_ = error_code;
error_message_ = std::string(error_message);
connect_stream_->WriteOrBufferBody("", true);
MaybeNotifyClose();
}
void WebTransportHttp3::OnConnectStreamFinReceived() {
if (close_received_) {
return;
}
close_received_ = true;
if (close_sent_) {
QUIC_DLOG(INFO) << "Ignoring received FIN as we've already sent our close.";
return;
}
connect_stream_->WriteOrBufferBody("", true);
MaybeNotifyClose();
}
void WebTransportHttp3::CloseSessionWithFinOnlyForTests() {
QUICHE_DCHECK(!close_sent_);
close_sent_ = true;
if (close_received_) {
return;
}
connect_stream_->WriteOrBufferBody("", true);
}
void WebTransportHttp3::HeadersReceived(
const quiche::HttpHeaderBlock& headers) {
if (session_->perspective() == Perspective::IS_CLIENT) {
int status_code;
if (!QuicSpdyStream::ParseHeaderStatusCode(headers, &status_code)) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received WebTransport headers from server without "
"a valid status code, rejecting.";
rejection_reason_ = WebTransportHttp3RejectionReason::kNoStatusCode;
return;
}
bool valid_status = status_code >= 200 && status_code <= 299;
if (!valid_status) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received WebTransport headers from server with "
"status code "
<< status_code << ", rejecting.";
rejection_reason_ = WebTransportHttp3RejectionReason::kWrongStatusCode;
return;
}
}
QUIC_DVLOG(1) << ENDPOINT << "WebTransport session " << id_ << " ready.";
ready_ = true;
visitor_->OnSessionReady();
session_->ProcessBufferedWebTransportStreamsForSession(this);
}
WebTransportStream* WebTransportHttp3::AcceptIncomingBidirectionalStream() {
while (!incoming_bidirectional_streams_.empty()) {
QuicStreamId id = incoming_bidirectional_streams_.front();
incoming_bidirectional_streams_.pop_front();
QuicSpdyStream* stream = session_->GetOrCreateSpdyDataStream(id);
if (stream == nullptr) {
continue;
}
return stream->web_transport_stream();
}
return nullptr;
}
WebTransportStream* WebTransportHttp3::AcceptIncomingUnidirectionalStream() {
while (!incoming_unidirectional_streams_.empty()) {
QuicStreamId id = incoming_unidirectional_streams_.front();
incoming_unidirectional_streams_.pop_front();
QuicStream* stream = session_->GetOrCreateStream(id);
if (stream == nullptr) {
continue;
}
return static_cast<WebTransportHttp3UnidirectionalStream*>(stream)
->interface();
}
return nullptr;
}
bool WebTransportHttp3::CanOpenNextOutgoingBidirectionalStream() {
return session_->CanOpenOutgoingBidirectionalWebTransportStream(id_);
}
bool WebTransportHttp3::CanOpenNextOutgoingUnidirectionalStream() {
return session_->CanOpenOutgoingUnidirectionalWebTransportStream(id_);
}
WebTransportStream* WebTransportHttp3::OpenOutgoingBidirectionalStream() {
QuicSpdyStream* stream =
session_->CreateOutgoingBidirectionalWebTransportStream(this);
if (stream == nullptr) {
return nullptr;
}
return stream->web_transport_stream();
}
WebTransportStream* WebTransportHttp3::OpenOutgoingUnidirectionalStream() {
WebTransportHttp3UnidirectionalStream* stream =
session_->CreateOutgoingUnidirectionalWebTransportStream(this);
if (stream == nullptr) {
return nullptr;
}
return stream->interface();
}
webtransport::Stream* WebTransportHttp3::GetStreamById(
webtransport::StreamId id) {
if (!streams_.contains(id)) {
return nullptr;
}
QuicStream* stream = session_->GetActiveStream(id);
const bool bidi = QuicUtils::IsBidirectionalStreamId(
id, ParsedQuicVersion::RFCv1());
if (bidi) {
return static_cast<QuicSpdyStream*>(stream)->web_transport_stream();
} else {
return static_cast<WebTransportHttp3UnidirectionalStream*>(stream)
->interface();
}
}
webtransport::DatagramStatus WebTransportHttp3::SendOrQueueDatagram(
absl::string_view datagram) {
return MessageStatusToWebTransportStatus(
connect_stream_->SendHttp3Datagram(datagram));
}
QuicByteCount WebTransportHttp3::GetMaxDatagramSize() const {
return connect_stream_->GetMaxDatagramSize();
}
void WebTransportHttp3::SetDatagramMaxTimeInQueue(
absl::Duration max_time_in_queue) {
connect_stream_->SetMaxDatagramTimeInQueue(QuicTimeDelta(max_time_in_queue));
}
void WebTransportHttp3::NotifySessionDraining() {
if (!drain_sent_) {
connect_stream_->WriteCapsule(
quiche::Capsule(quiche::DrainWebTransportSessionCapsule()));
drain_sent_ = true;
}
}
void WebTransportHttp3::OnHttp3Datagram(QuicStreamId stream_id,
absl::string_view payload) {
QUICHE_DCHECK_EQ(stream_id, connect_stream_->id());
visitor_->OnDatagramReceived(payload);
}
void WebTransportHttp3::MaybeNotifyClose() {
if (close_notified_) {
return;
}
close_notified_ = true;
visitor_->OnSessionClosed(error_code_, error_message_);
}
void WebTransportHttp3::OnGoAwayReceived() {
if (drain_callback_ != nullptr) {
std::move(drain_callback_)();
drain_callback_ = nullptr;
}
}
void WebTransportHttp3::OnDrainSessionReceived() { OnGoAwayReceived(); }
WebTransportHttp3UnidirectionalStream::WebTransportHttp3UnidirectionalStream(
PendingStream* pending, QuicSpdySession* session)
: QuicStream(pending, session, false),
session_(session),
adapter_(session, this, sequencer(), std::nullopt),
needs_to_send_preamble_(false) {
sequencer()->set_level_triggered(true);
}
WebTransportHttp3UnidirectionalStream::WebTransportHttp3UnidirectionalStream(
QuicStreamId id, QuicSpdySession* session, WebTransportSessionId session_id)
: QuicStream(id, session, false, WRITE_UNIDIRECTIONAL),
session_(session),
adapter_(session, this, sequencer(), session_id),
session_id_(session_id),
needs_to_send_preamble_(true) {}
void WebTransportHttp3UnidirectionalStream::WritePreamble() {
if (!needs_to_send_preamble_ || !session_id_.has_value()) {
QUIC_BUG(WebTransportHttp3UnidirectionalStream duplicate preamble)
<< ENDPOINT << "Sending preamble on stream ID " << id()
<< " at the wrong time.";
OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"Attempting to send a WebTransport unidirectional "
"stream preamble at the wrong time.");
return;
}
QuicConnection::ScopedPacketFlusher flusher(session_->connection());
char buffer[sizeof(uint64_t) * 2];
QuicDataWriter writer(sizeof(buffer), buffer);
bool success = true;
success = success && writer.WriteVarInt62(kWebTransportUnidirectionalStream);
success = success && writer.WriteVarInt62(*session_id_);
QUICHE_DCHECK(success);
WriteOrBufferData(absl::string_view(buffer, writer.length()), false,
nullptr);
QUIC_DVLOG(1) << ENDPOINT << "Sent stream type and session ID ("
<< *session_id_ << ") on WebTransport stream " << id();
needs_to_send_preamble_ = false;
}
bool WebTransportHttp3UnidirectionalStream::ReadSessionId() {
iovec iov;
if (!sequencer()->GetReadableRegion(&iov)) {
return false;
}
QuicDataReader reader(static_cast<const char*>(iov.iov_base), iov.iov_len);
WebTransportSessionId session_id;
uint8_t session_id_length = reader.PeekVarInt62Length();
if (!reader.ReadVarInt62(&session_id)) {
if (sequencer()->IsAllDataAvailable()) {
QUIC_DLOG(WARNING)
<< ENDPOINT << "Failed to associate WebTransport stream " << id()
<< " with a session because the stream ended prematurely.";
sequencer()->MarkConsumed(sequencer()->NumBytesBuffered());
}
return false;
}
sequencer()->MarkConsumed(session_id_length);
session_id_ = session_id;
adapter_.SetSessionId(session_id);
session_->AssociateIncomingWebTransportStreamWithSession(session_id, id());
return true;
}
void WebTransportHttp3UnidirectionalStream::OnDataAvailable() {
if (!session_id_.has_value()) {
if (!ReadSessionId()) {
return;
}
}
adapter_.OnDataAvailable();
}
void WebTransportHttp3UnidirectionalStream::OnCanWriteNewData() {
adapter_.OnCanWriteNewData();
}
void WebTransportHttp3UnidirectionalStream::OnClose() {
QuicStream::OnClose();
if (!session_id_.has_value()) {
return;
}
WebTransportHttp3* session = session_->GetWebTransportSession(*session_id_);
if (session == nullptr) {
QUIC_DLOG(WARNING) << ENDPOINT << "WebTransport stream " << id()
<< " attempted to notify parent session " << *session_id_
<< ", but the session could not be found.";
return;
}
session->OnStreamClosed(id());
}
void WebTransportHttp3UnidirectionalStream::OnStreamReset(
const QuicRstStreamFrame& frame) {
if (adapter_.visitor() != nullptr) {
adapter_.visitor()->OnResetStreamReceived(
Http3ErrorToWebTransportOrDefault(frame.ietf_error_code));
}
QuicStream::OnStreamReset(frame);
}
bool WebTransportHttp3UnidirectionalStream::OnStopSending(
QuicResetStreamError error) {
if (adapter_.visitor() != nullptr) {
adapter_.visitor()->OnStopSendingReceived(
Http3ErrorToWebTransportOrDefault(error.ietf_application_code()));
}
return QuicStream::OnStopSending(error);
}
void WebTransportHttp3UnidirectionalStream::OnWriteSideInDataRecvdState() {
if (adapter_.visitor() != nullptr) {
adapter_.visitor()->OnWriteSideInDataRecvdState();
}
QuicStream::OnWriteSideInDataRecvdState();
}
namespace {
constexpr uint64_t kWebTransportMappedErrorCodeFirst = 0x52e4a40fa8db;
constexpr uint64_t kWebTransportMappedErrorCodeLast = 0x52e5ac983162;
constexpr WebTransportStreamError kDefaultWebTransportError = 0;
}
std::optional<WebTransportStreamError> Http3ErrorToWebTransport(
uint64_t http3_error_code) {
if (http3_error_code < kWebTransportMappedErrorCodeFirst ||
http3_error_code > kWebTransportMappedErrorCodeLast) {
return std::nullopt;
}
if ((http3_error_code - 0x21) % 0x1f == 0) {
return std::nullopt;
}
uint64_t shifted = http3_error_code - kWebTransportMappedErrorCodeFirst;
uint64_t result = shifted - shifted / 0x1f;
QUICHE_DCHECK_LE(result,
std::numeric_limits<webtransport::StreamErrorCode>::max());
return static_cast<WebTransportStreamError>(result);
}
WebTransportStreamError Http3ErrorToWebTransportOrDefault(
uint64_t http3_error_code) {
std::optional<WebTransportStreamError> result =
Http3ErrorToWebTransport(http3_error_code);
return result.has_value() ? *result : kDefaultWebTransportError;
}
uint64_t WebTransportErrorToHttp3(
WebTransportStreamError webtransport_error_code) {
return kWebTransportMappedErrorCodeFirst + webtransport_error_code +
webtransport_error_code / 0x1e;
}
} | #include "quiche/quic/core/http/web_transport_http3.h"
#include <cstdint>
#include <limits>
#include <optional>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace {
using ::testing::Optional;
TEST(WebTransportHttp3Test, ErrorCodesToHttp3) {
EXPECT_EQ(0x52e4a40fa8dbu, WebTransportErrorToHttp3(0x00));
EXPECT_EQ(0x52e4a40fa9e2u, WebTransportErrorToHttp3(0xff));
EXPECT_EQ(0x52e5ac983162u, WebTransportErrorToHttp3(0xffffffff));
EXPECT_EQ(0x52e4a40fa8f7u, WebTransportErrorToHttp3(0x1c));
EXPECT_EQ(0x52e4a40fa8f8u, WebTransportErrorToHttp3(0x1d));
EXPECT_EQ(0x52e4a40fa8fau, WebTransportErrorToHttp3(0x1e));
}
TEST(WebTransportHttp3Test, ErrorCodesToWebTransport) {
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8db), Optional(0x00));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa9e2), Optional(0xff));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e5ac983162u), Optional(0xffffffff));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f7), Optional(0x1cu));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f8), Optional(0x1du));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f9), std::nullopt);
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8fa), Optional(0x1eu));
EXPECT_EQ(Http3ErrorToWebTransport(0), std::nullopt);
EXPECT_EQ(Http3ErrorToWebTransport(std::numeric_limits<uint64_t>::max()),
std::nullopt);
}
TEST(WebTransportHttp3Test, ErrorCodeRoundTrip) {
for (int error = 0; error <= 65536; error++) {
uint64_t http_error = WebTransportErrorToHttp3(error);
std::optional<WebTransportStreamError> mapped_back =
quic::Http3ErrorToWebTransport(http_error);
ASSERT_THAT(mapped_back, Optional(error));
}
for (int64_t error = 0; error < std::numeric_limits<uint32_t>::max();
error += 65537) {
uint64_t http_error = WebTransportErrorToHttp3(error);
std::optional<WebTransportStreamError> mapped_back =
quic::Http3ErrorToWebTransport(http_error);
ASSERT_THAT(mapped_back, Optional(error));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/web_transport_http3.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/web_transport_http3_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
50beb168-8ca5-4457-99cd-a70833c5bb18 | cpp | tensorflow/tensorflow | device_compilation_cluster_signature | tensorflow/compiler/jit/device_compilation_cluster_signature.cc | tensorflow/compiler/jit/device_compilation_cluster_signature_test.cc | #include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include <string>
#include <utility>
#include <variant>
namespace tensorflow {
namespace {
using Signature = DeviceCompilationClusterSignature;
using TensorTypeAndShape = Signature::TensorTypeAndShape;
struct SignatureHumanStringAppender {
explicit SignatureHumanStringAppender(std::string* dest) : dest(dest) {}
std::string* dest;
void operator()(const Tensor& arg) {
absl::StrAppend(dest, "; ", arg.DebugString());
}
void operator()(const TensorTypeAndShape& arg) {
absl::StrAppend(dest, ",", DataTypeString(arg.first));
absl::StrAppend(dest, " [", absl::StrJoin(arg.second, ","), "]");
}
};
struct SignatureNotEqual {
bool operator()(const Tensor& arg, const Tensor& other) {
return arg.dtype() != other.dtype() || arg.shape() != other.shape() ||
arg.tensor_data() != other.tensor_data();
}
bool operator()(const TensorTypeAndShape& arg,
const TensorTypeAndShape& other) {
return arg.first != other.first || arg.second != other.second;
}
bool operator()(const Tensor& arg, const TensorTypeAndShape& other) {
return true;
}
bool operator()(const TensorTypeAndShape& arg, const Tensor& other) {
return true;
}
};
struct SignatureHashCombiner {
explicit SignatureHashCombiner(const uint64 h) : h(h) {}
uint64 h;
uint64 operator()(const Tensor& arg) {
h = Hash64Combine(h, std::hash<int>()(static_cast<int>(arg.dtype())));
h = Hash64Combine(
h, Hash64(arg.tensor_data().data(), arg.tensor_data().size()));
for (int dim = 0; dim < arg.dims(); ++dim) {
h = Hash64Combine(h, std::hash<int>()(arg.dim_size(dim)));
}
return h;
}
uint64 operator()(const TensorTypeAndShape& arg) {
h = Hash64Combine(h, std::hash<int>()(static_cast<int>(arg.first)));
h = Hash64Combine(h, std::hash<int>()(arg.second.size()));
for (int dim : arg.second) {
h = Hash64Combine(h, std::hash<int>()(dim));
}
return h;
}
};
}
std::string Signature::HumanString() const {
std::string result = name;
for (const auto& arg : args) {
std::visit(SignatureHumanStringAppender(&result), arg);
}
return result;
}
bool Signature::operator==(const Signature& other) const {
if (name != other.name) return false;
if (args.size() != other.args.size()) return false;
for (int i = 0, end = args.size(); i < end; ++i) {
if (std::visit(SignatureNotEqual(), args[i], other.args[i])) {
return false;
}
}
return true;
}
uint64 Signature::Hash::operator()(const Signature& signature) const {
uint64 h = std::hash<string>()(signature.name);
for (const auto& arg : signature.args) {
h = std::visit(SignatureHashCombiner(h), arg);
}
return h;
}
absl::StatusOr<Signature> Signature::Build(
const NameAttrList& function,
absl::Span<const XlaCompiler::Argument> args) {
Signature signature;
signature.name = Canonicalize(function.name(), AttrSlice(&function.attr()));
for (const XlaCompiler::Argument& arg : args) {
switch (arg.kind) {
case XlaCompiler::Argument::kConstant:
case XlaCompiler::Argument::kConstantResource:
signature.args.push_back(arg.constant_value);
break;
case XlaCompiler::Argument::kParameter:
case XlaCompiler::Argument::kResource:
signature.args.push_back(
TensorTypeAndShape(arg.type, arg.DimensionSizesAsInlinedVector()));
break;
default:
return errors::InvalidArgument(
"Unhandled argument kind in XlaCompilationCache: ",
arg.HumanString());
}
}
return std::move(signature);
}
} | #include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include <utility>
#include <vector>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "xla/client/client_library.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
using SignatureHash = DeviceCompilationClusterSignature::Hash;
TEST(DeviceCompilationClusterSignatureTest, SignatureEquality) {
NameAttrList fn;
fn.set_name("afunction");
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_INT32;
args[0].shape = TensorShape({4, 0});
args[0].constant_value = Tensor(DT_INT32, {4, 0});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s1,
DeviceCompilationClusterSignature::Build(fn, args));
args[0].type = DT_FLOAT;
args[0].constant_value = Tensor(DT_FLOAT, {4, 0});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s2,
DeviceCompilationClusterSignature::Build(fn, args));
args[0].shape = TensorShape({0, 4});
args[0].constant_value = Tensor(DT_FLOAT, {0, 4});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s3,
DeviceCompilationClusterSignature::Build(fn, args));
std::vector<DeviceCompilationClusterSignature> signatures = {s1, s2, s3};
for (int i = 0; i < signatures.size(); ++i) {
for (int j = 0; j < signatures.size(); ++j) {
EXPECT_EQ(i == j, signatures[i] == signatures[j])
<< "s1: " << signatures[i].HumanString() << "\n"
<< "s2: " << signatures[j].HumanString();
EXPECT_EQ(i == j,
signatures[i].HumanString() == signatures[j].HumanString())
<< "s1: " << signatures[i].HumanString() << "\n"
<< "s2: " << signatures[j].HumanString();
EXPECT_EQ(i == j, SignatureHash()(signatures[i]) ==
SignatureHash()(signatures[j]))
<< "s1: " << signatures[i].HumanString() << "\n"
<< "s1_hash: " << SignatureHash()(signatures[i]) << "\n"
<< "s2: " << signatures[j].HumanString() << "\n"
<< "s2_hash: " << SignatureHash()(signatures[j]);
}
}
}
TEST(DeviceCompilationClusterSignatureTest, SignatureUniqueness) {
NameAttrList fn;
fn.set_name("afunction");
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_INT32;
args[0].constant_value = Tensor(DT_INT32, {4, 0});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({4, 0});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s1,
DeviceCompilationClusterSignature::Build(fn, args));
using std::swap;
swap(args[0], args[1]);
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s2,
DeviceCompilationClusterSignature::Build(fn, args));
EXPECT_NE(s1.HumanString(), s2.HumanString());
EXPECT_NE(SignatureHash()(s1), SignatureHash()(s2));
EXPECT_FALSE(s1 == s2);
}
void BM_BuildSignature(::testing::benchmark::State& state) {
const int n_args = state.range(0);
NameAttrList fn;
fn.set_name("afunction");
for (int i = 0; i < n_args; i++) {
(*fn.mutable_attr())[absl::StrCat("T", i)].set_type(DT_FLOAT);
}
std::vector<XlaCompiler::Argument> args(n_args);
for (int i = 0; i < n_args; i++) {
args[i].kind = (((i % 3) == 0) ? XlaCompiler::Argument::kConstant
: XlaCompiler::Argument::kParameter);
args[i].type = DT_INT32;
args[i].shape = TensorShape({4, 0});
args[i].constant_value = Tensor(DT_INT32, {4, 0});
}
for (auto i : state) {
auto s = DeviceCompilationClusterSignature::Build(fn, args);
CHECK(s.ok());
DeviceCompilationClusterSignature sig = std::move(s.value());
}
}
BENCHMARK(BM_BuildSignature)->Arg(0)->Arg(1)->Arg(2)->Arg(5)->Arg(10);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cluster_signature.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cluster_signature_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6163921-3eb0-4635-9477-0c8012ec8a36 | cpp | tensorflow/tensorflow | shape_refiner | tensorflow/core/common_runtime/shape_refiner.cc | tensorflow/core/common_runtime/shape_refiner_test.cc | #include "tensorflow/core/common_runtime/shape_refiner.h"
#include <deque>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
ShapeRefiner::ShapeRefiner(int graph_def_version,
const OpRegistryInterface* ops)
: graph_def_version_(graph_def_version),
ops_registry_(ops),
graph_runner_(Env::Default()) {}
ShapeRefiner::ShapeRefiner(const VersionDef& versions,
const OpRegistryInterface* ops)
: ShapeRefiner(versions.producer(), ops) {}
ShapeRefiner::~ShapeRefiner() {
const_tensor_map_.clear();
}
namespace {
constexpr char kArgOp[] = "_Arg";
constexpr char kRetvalOp[] = "_Retval";
}
Status ShapeRefiner::InferShapesForFunctionSubNode(
const Node* node, InferenceContext* outer_context) {
TF_RETURN_IF_ERROR(AddNodeInternal(node, outer_context));
InferenceContext* node_context = CHECK_NOTNULL(GetContext(node));
if (StringPiece(node->type_string()) == kArgOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_inputs() <= index) {
return errors::Internal(
"Function instantiation included invalid input index: ", index,
" not in [0, ", outer_context->num_inputs(), ").");
}
if (outer_context->input(index).SameHandle(ShapeHandle())) {
VLOG(1) << "Function instantiation has undefined input shape at "
<< "index: " << index << " in the outer inference context.";
node_context->set_output(0, node_context->UnknownShape());
} else {
node_context->set_output(0, outer_context->input(index));
}
auto* resource = outer_context->input_handle_shapes_and_types(index);
if (resource) {
node_context->set_output_handle_shapes_and_types(0, *resource);
}
} else if (StringPiece(node->type_string()) == kRetvalOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_outputs() <= index) {
return errors::Internal(
"Function instantiation included invalid output index: ", index,
" not in [0, ", outer_context->num_outputs(), ").");
}
ShapeHandle handle;
TensorShapeProto proto;
node_context->ShapeHandleToProto(node_context->input(0), &proto);
TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle));
outer_context->set_output(index, handle);
const std::vector<ShapeAndType>* resource =
node_context->input_handle_shapes_and_types(0);
if (resource) {
std::vector<ShapeAndType> copied_shapes_and_types;
for (auto& shape_and_type : *resource) {
ShapeHandle handle;
TensorShapeProto proto;
node_context->ShapeHandleToProto(shape_and_type.shape, &proto);
TF_RETURN_IF_ERROR(
outer_context->MakeShapeFromShapeProto(proto, &handle));
copied_shapes_and_types.push_back(
ShapeAndType(handle, shape_and_type.dtype, shape_and_type.type));
}
outer_context->set_output_handle_shapes_and_types(
index, copied_shapes_and_types);
}
}
return absl::OkStatus();
}
Status ShapeRefiner::InferShapesForFunction(const FunctionDef* function_def,
AttrSlice attributes,
InferenceContext* outer_context) {
const Graph* graph;
const string& fname = function_def->signature().name();
auto it = functions_.find(fname);
if (it != functions_.end()) {
graph = it->second.get();
} else {
InstantiationResult result;
TF_RETURN_IF_ERROR(InstantiateFunction(
*function_def, attributes,
[this](const string& op, const OpDef** sig) {
return this->function_library_->LookUpOpDef(op, sig);
},
&result));
Graph* new_graph = new Graph(function_library_);
GraphConstructorOptions options;
options.allow_internal_ops = true;
TF_RETURN_IF_ERROR(
ConvertNodeDefsToGraph(options, result.nodes, new_graph));
functions_[fname].reset(new_graph);
graph = new_graph;
}
absl::flat_hash_set<const Node*> function_nodes;
Status inference_status = absl::OkStatus();
{
auto node_shape_inference_lambda = [this, &outer_context, &function_nodes,
&inference_status](const Node* node) {
if (!inference_status.ok()) return;
inference_status = InferShapesForFunctionSubNode(node, outer_context);
function_nodes.insert(node);
};
ReverseDFS(*graph, {}, node_shape_inference_lambda);
}
for (const Node* node : function_nodes) {
node_to_context_.erase(node);
}
return inference_status;
}
Status ShapeRefiner::AddNode(const Node* node) {
return AddNodeInternal(node, nullptr);
}
Status ShapeRefiner::AddNodeInternal(
const Node* node, shape_inference::InferenceContext* outer_context) {
std::unique_ptr<InferenceContext> ic(new InferenceContext(
graph_def_version_, node->def(), node->op_def(),
std::vector<ShapeHandle>(node->num_inputs()), {}, {}, {}));
TF_RETURN_IF_ERROR(ic->construction_status());
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) continue;
if (e->dst_input() < 0) {
return tensorflow::errors::Internal(
"Index ", e->dst_input(), " is negative but not a control edge.");
}
const Node* input = e->src();
auto it = node_to_context_.find(input);
if (it == node_to_context_.end()) {
ic->SetInput(e->dst_input(), ic->UnknownShape());
continue;
}
InferenceContext* input_ic = it->second.get();
ic->SetInput(e->dst_input(), input_ic->output(e->src_output()));
const auto* in_v =
input_ic->output_handle_shapes_and_types(e->src_output());
if (in_v != nullptr) {
DataType input_type = e->src()->output_type(e->src_output());
DCHECK(input_type == DT_RESOURCE || input_type == DT_VARIANT);
ic->set_input_handle_shapes_and_types(e->dst_input(),
std::vector<ShapeAndType>(*in_v));
}
}
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(ops_registry_->LookUp(node->type_string(), &op_reg_data));
if (op_reg_data->shape_inference_fn == nullptr &&
require_shape_inference_fns_) {
return errors::InvalidArgument(
"No shape inference function exists for op '", node->type_string(),
"', did you forget to define it?");
}
TF_RETURN_IF_ERROR(RunShapeFn(node, op_reg_data, ic.get(), outer_context));
node_to_context_[node].swap(ic);
return absl::OkStatus();
}
Status ShapeRefiner::SetShape(const Node* node, int output_port,
ShapeHandle shape) {
auto c = GetContext(node);
if (c == nullptr) {
return errors::Internal("Could not find context for ", node->name());
}
if (output_port < 0 || output_port >= node->num_outputs()) {
return errors::InvalidArgument(
"output_port '", output_port, "' is out of range, ", "node '",
node->name(), "' has ", node->num_outputs(), " outputs");
}
if (node->num_outputs() > c->num_outputs()) {
TF_RETURN_IF_ERROR(c->ExpandOutputs(node->num_outputs()));
}
ShapeHandle existing_shape = c->output(output_port);
TF_RETURN_IF_ERROR(c->Merge(existing_shape, shape, &shape));
c->set_output(output_port, shape);
return absl::OkStatus();
}
Status ShapeRefiner::UpdateNode(const Node* node, bool relax, bool* refined) {
auto it = node_to_context_.find(node);
if (it == node_to_context_.end()) {
*refined = true;
return AddNode(node);
}
InferenceContext* node_context = it->second.get();
TF_RETURN_IF_ERROR(node_context->construction_status());
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) continue;
int dst_input = e->dst_input();
int src_output = e->src_output();
Node* input = e->src();
auto iter = node_to_context_.find(input);
if (iter == node_to_context_.end()) {
return errors::FailedPrecondition(
"Input ", dst_input, " ('", input->name(), "') for '", node->name(),
"' was not previously added to ShapeRefiner.");
}
InferenceContext* c = iter->second.get();
DCHECK_GE(dst_input, 0);
ShapeHandle existing_input = node_context->input(dst_input);
if (!relax) {
if (node_context->MergeInput(dst_input, c->output(src_output))) {
if (!SameDefinedShape(node_context, node_context->input(dst_input),
existing_input)) {
*refined = true;
}
}
} else {
if (node_context->RelaxInput(dst_input, c->output(src_output))) {
if (!SameDefinedShape(node_context, node_context->input(dst_input),
existing_input)) {
*refined = true;
}
}
}
if (node_context->requested_input_tensor_as_partial_shape(dst_input)) {
*refined = true;
}
if (e->src()->output_type(src_output) == DT_RESOURCE) {
auto* outputs = c->output_handle_shapes_and_types(src_output);
if (!outputs) continue;
if (!relax &&
node_context->MergeInputHandleShapesAndTypes(dst_input, *outputs)) {
*refined = true;
} else if (relax) {
std::vector<ShapeAndType> existing_inputs;
const std::vector<ShapeAndType>* inputs =
node_context->input_handle_shapes_and_types(dst_input);
if (inputs) {
existing_inputs = *inputs;
}
if (node_context->RelaxInputHandleShapesAndMergeTypes(dst_input,
*outputs)) {
if (IsUpdatedShapesOrTypes(
node_context, existing_inputs,
*node_context->input_handle_shapes_and_types(dst_input))) {
*refined = true;
}
}
}
}
}
if (!*refined) {
return absl::OkStatus();
}
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(ops_registry_->LookUp(node->type_string(), &op_reg_data));
if (op_reg_data->shape_inference_fn == nullptr &&
require_shape_inference_fns_) {
return errors::InvalidArgument(
"No shape inference function exists for op '", node->type_string(),
"', did you forget to define it?");
}
if (!op_reg_data->shape_inference_fn) {
return absl::OkStatus();
}
return RunShapeFn(node, op_reg_data, node_context);
}
Status ShapeRefiner::EvaluateConstantTensorForEdge(
const Node* node, int dst_idx, bool* evaluated, Tensor* result,
InferenceContext* outer_context) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(node->input_edge(dst_idx, &input_edge));
const Node& src = *input_edge->src();
const int src_output = input_edge->src_output();
auto lookup = [&](const Node& node, int index) -> std::optional<Tensor> {
if (node.IsArg() && outer_context != nullptr) {
int index;
if (GetNodeAttr(node.def(), "index", &index).ok() && 0 <= index &&
index < outer_context->num_inputs()) {
const auto* tensor = outer_context->input_tensor(index);
outer_context->request_input_tensor(index);
if (tensor != nullptr) {
return *tensor;
}
}
}
auto it = const_tensor_map_.find({node.id(), index});
if (it != const_tensor_map_.end()) {
return it->second;
}
return std::optional<Tensor>();
};
std::optional<EvaluateConstantTensorRunner> runner;
if (!disable_constant_propagation_) {
runner = EvaluateConstantTensorRunner{
ops_registry_,
graph_def_version_,
&graph_runner_,
};
}
TF_ASSIGN_OR_RETURN(auto tensor, EvaluateConstantTensor(
src, src_output, *this, lookup, runner));
*evaluated = tensor.has_value();
if (tensor.has_value()) {
if (tensor->TotalBytes() <= kMaxTensorSize) {
const_tensor_map_.emplace(std::make_pair(src.id(), src_output), *tensor);
}
*result = *std::move(tensor);
}
return absl::OkStatus();
}
Status ShapeRefiner::EvaluateConstantIntScalarEdge(
const Node* node, int dst_idx, bool* evaluated, int64_t* result,
shape_inference::InferenceContext* outer_context) {
Tensor scalar;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(node, dst_idx, evaluated,
&scalar, outer_context));
if (*evaluated) {
if (scalar.NumElements() != 1) {
return errors::InvalidArgument(
"EvaluateConstantIntScalarEdge called on non-scalar edge: ",
scalar.NumElements());
}
if (scalar.dtype() == DT_INT32) {
*result = scalar.scalar<int32>()();
} else {
if (scalar.dtype() != DT_INT64) {
return errors::InvalidArgument(
"EvaluateConstantIntScalarEdge called on non-integer edge: ",
scalar.dtype());
}
*result = scalar.scalar<int64_t>()();
}
}
return absl::OkStatus();
}
Status ShapeRefiner::ConstantPartialShape(
InferenceContext* target_context, const Node* node, int dst_idx,
ShapeHandle* result, shape_inference::InferenceContext* outer_context) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(node->input_edge(dst_idx, &input_edge));
InferenceContext* src_context = GetContext(input_edge->src());
if (src_context == nullptr) return errors::Internal("Missing src context");
ShapeHandle src_shape = src_context->output(input_edge->src_output());
if (src_context->Value(src_context->Rank(src_shape)) == 0) {
Tensor t;
bool evaluated = false;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(node, dst_idx, &evaluated,
&t, outer_context));
if (!evaluated) {
return errors::InvalidArgument(
"Received a shape scalar with unknown static value. A static value "
"of '-1' is required to represent an unknown shape.");
}
if (t.dims() == 0) {
if (t.dtype() == DT_INT32 && t.scalar<int32>()() == -1) {
*result = target_context->UnknownShape();
return absl::OkStatus();
} else if (t.dtype() == DT_INT64 && t.scalar<int64_t>()() == -1) {
*result = target_context->UnknownShape();
return absl::OkStatus();
}
}
return errors::InvalidArgument(
"Received an invalid shape scalar with a static value that is not "
"'-1': ",
t.DebugString());
}
TF_RETURN_IF_ERROR(src_context->WithRank(src_shape, 1, &src_shape));
const string& src_op = input_edge->src()->type_string();
if (src_context->Value(src_context->Dim(src_shape, 0)) == 0) {
*result = target_context->Scalar();
} else if (src_op == "Cast") {
Tensor t;
bool evaluated = false;
if (EvaluateConstantTensorForEdge(node, dst_idx, &evaluated, &t,
outer_context)
.ok()) {
if (evaluated &&
target_context->MakeShapeFromTensor(&t, src_shape, result).ok()) {
return absl::OkStatus();
}
}
ShapeHandle pre_cast_shape;
if (!ConstantPartialShape(target_context, input_edge->src(), 0,
&pre_cast_shape, outer_context)
.ok()) {
TF_RETURN_IF_ERROR(
target_context->MakeShapeFromTensor(nullptr, src_shape, result));
}
if (!target_context->RankKnown(pre_cast_shape)) {
*result = target_context->UnknownShape();
return absl::OkStatus();
}
auto* dest_type = input_edge->src()->attrs().Find("DstT");
if (dest_type == nullptr || dest_type->value_case() != AttrValue::kType ||
(dest_type->type() != DT_INT32 && dest_type->type() != DT_INT64)) {
*result = target_context->MakeShape(std::vector<DimensionHandle>(
target_context->Rank(pre_cast_shape), target_context->UnknownDim()));
return absl::OkStatus();
}
*result = pre_cast_shape;
} else if (src_op == "Shape") {
*result = src_context->input(0);
} else if (src_op == "ShapeN") {
*result = src_context->input(input_edge->src_output());
} else if (src_op == "Pack") {
std::vector<DimensionHandle> dims;
for (int i = 0; i < src_context->num_inputs(); ++i) {
int64_t size;
bool evaluated;
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(
input_edge->src(), i, &evaluated, &size, outer_context));
if (evaluated) {
dims.push_back(size < 0 ? target_context->UnknownDim()
: target_context->MakeDim(size));
} else {
dims.push_back(target_context->UnknownDim());
}
}
*result = target_context->MakeShape(dims);
} else if (src_op == "Concat" || src_op == "ConcatV2") {
*result = target_context->Scalar();
const int concat_dim =
src_op == "Concat" ? 0 : src_context->num_inputs() - 1;
for (int i = 0; i < src_context->num_inputs(); ++i) {
if (i == concat_dim) continue;
ShapeHandle sub_result;
TF_RETURN_IF_ERROR(ConstantPartialShape(target_context, input_edge->src(),
i, &sub_result, outer_context));
if (!target_context->RankKnown(sub_result)) {
*result = target_context->UnknownShape();
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
target_context->Concatenate(*result, sub_result, result));
}
} else if (src_op == "StridedSlice") {
TF_RETURN_IF_ERROR(PartialStridedSliceShape(input_edge->src(), src_context,
result, outer_context));
} else if (src_op == "VariableShape") {
auto* handle_data = src_context->input_handle_shapes_and_types(0);
if (handle_data != nullptr && !handle_data->empty()) {
*result = handle_data->at(0).shape;
} else {
*result = target_context->UnknownShape();
}
} else {
Tensor t;
bool evaluated = false;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(node, dst_idx, &evaluated,
&t, outer_context));
TF_RETURN_IF_ERROR(target_context->MakeShapeFromTensor(
evaluated ? &t : nullptr, src_shape, result));
}
return absl::OkStatus();
}
Status ShapeRefiner::PartialStridedSliceShape(
Node* slice_node, InferenceContext* ctx, ShapeHandle* result,
shape_inference::InferenceContext* outer_context) {
for (int i = 1; i <= 3; ++i) {
ShapeHandle input_shape = ctx->input(i);
if (ctx->Value(ctx->Dim(input_shape, 0)) != 1) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
}
int begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask;
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "begin_mask", &begin_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(slice_node->attrs(), "end_mask", &end_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "ellipsis_mask", &ellipsis_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "new_axis_mask", &new_axis_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "shrink_axis_mask", &shrink_axis_mask));
if (!(begin_mask == 0 || begin_mask == 1) ||
!(end_mask == 0 || end_mask == 1) || ellipsis_mask != 0 ||
new_axis_mask != 0 || shrink_axis_mask != 0) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
bool evaluated;
int64_t begin;
if (begin_mask == 1) {
begin = 0;
} else {
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(slice_node, 1, &evaluated,
&begin, outer_context));
if (!evaluated) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
}
int64_t end;
if (end_mask == 1) {
end = std::numeric_limits<int64_t>::max();
} else {
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(slice_node, 2, &evaluated,
&end, outer_context));
if (!evaluated) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
}
int64_t stride;
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(slice_node, 3, &evaluated,
&stride, outer_context));
if (!evaluated) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
ShapeHandle input;
TF_RETURN_IF_ERROR(
ConstantPartialShape(ctx, slice_node, 0, &input, outer_context));
TF_RETURN_IF_ERROR(ctx->Subshape(input, begin, end, stride, result));
return absl::OkStatus();
}
Status ShapeRefiner::RunShapeFn(const Node* node,
const OpRegistrationData* op_reg_data,
InferenceContext* c,
InferenceContext* outer_context) {
std::vector<const Tensor*> input_tensors(node->num_inputs(), nullptr);
std::vector<Tensor> real_tensors(node->num_inputs());
std::vector<bool> attempted_materialization(node->num_inputs());
std::vector<bool> attempted_tensor_as_shape_conversion(node->num_inputs());
std::vector<ShapeHandle> input_tensors_as_shapes;
c->set_input_tensors(input_tensors);
c->set_input_tensors_as_shapes(input_tensors_as_shapes);
auto run_inference_lambda = [&]() {
if (function_library_ && IsFunctionCall(*function_library_, *node)) {
bool disable_shape_inference;
if (!GetNodeAttr(AttrSlice(node->def()), "_disable_call_shape_inference",
&disable_shape_inference)
.ok() ||
!disable_shape_inference) {
NameAttrList function;
TF_RETURN_IF_ERROR(
NameAndAttrsFromFunctionCall(node->def(), &function));
const FunctionDef* function_def =
function_library_->Find(function.name());
if (function_def != nullptr) {
auto const_tensor_map_copy = const_tensor_map_;
const_tensor_map_.clear();
VLOG(4) << "Running shape inference for function \""
<< function.name() << "\".";
Status function_inference_status = InferShapesForFunction(
function_def, AttrSlice(&function.attr()), c);
const_tensor_map_ = const_tensor_map_copy;
VLOG(4) << "Shape inference for function \"" << function.name()
<< "\" returned status " << function_inference_status << ".";
return function_inference_status;
}
}
}
if (op_reg_data->shape_inference_fn) {
VLOG(4) << "Running shape inference function for node \"" << node->name()
<< "\" of type \"" << node->type_string() << "\".";
TF_RETURN_IF_ERROR(c->Run(op_reg_data->shape_inference_fn));
} else {
VLOG(4) << "Unknown shape inference function for node \"" << node->name()
<< "\" of type \"" << node->type_string() << "\".";
TF_RETURN_IF_ERROR(c->Run(shape_inference::UnknownShape));
}
VLOG(4) << "Shape inference passed for node \"" << node->name()
<< "\" of type \"" << node->type_string() << "\".";
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(run_inference_lambda());
bool rerun_shape_fn;
do {
rerun_shape_fn = false;
for (int i = 0; i < c->num_inputs(); ++i) {
if (!c->requested_input_tensor(i)) {
continue;
}
if (!attempted_materialization[i]) {
attempted_materialization[i] = true;
Tensor result;
bool evaluated = false;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(
node, i, &evaluated, &result, outer_context));
if (evaluated) {
real_tensors[i] = result;
input_tensors[i] = &real_tensors[i];
rerun_shape_fn = true;
}
}
if (c->requested_input_tensor_as_partial_shape(i) &&
!attempted_tensor_as_shape_conversion[i]) {
attempted_tensor_as_shape_conversion[i] = true;
if (i >= input_tensors_as_shapes.size()) {
input_tensors_as_shapes.resize(i + 1);
}
ShapeHandle s;
TF_RETURN_IF_ERROR(ConstantPartialShape(c, node, i, &s, outer_context));
input_tensors_as_shapes[i] = s;
rerun_shape_fn = true;
}
}
if (rerun_shape_fn) {
c->set_input_tensors(input_tensors);
c->set_input_tensors_as_shapes(input_tensors_as_shapes);
TF_RETURN_IF_ERROR(run_inference_lambda());
}
} while (rerun_shape_fn);
return absl::OkStatus();
}
bool ShapeRefiner::SameDefinedShape(InferenceContext* c, ShapeHandle s0,
ShapeHandle s1) {
if (s0.SameHandle(s1)) {
return true;
}
if (c->Rank(s0) != c->Rank(s1)) {
return false;
}
if (!c->RankKnown(s0) && !c->RankKnown(s1)) {
return false;
}
for (int i = 0; i < c->Rank(s0); ++i) {
if (!c->Dim(s0, i).SameHandle(c->Dim(s1, i))) {
int64_t val0 = c->Value(c->Dim(s0, i));
int64_t val1 = c->Value(c->Dim(s1, i));
if (val0 < 0 || val1 < 0 || val0 != val1) {
return false;
}
}
}
return true;
}
bool ShapeRefiner::IsUpdatedShapesOrTypes(
InferenceContext* c, const std::vector<ShapeAndType>& existing,
const std::vector<ShapeAndType>& updated) {
if (existing.size() != updated.size()) {
return true;
}
for (int i = 0; i < existing.size(); i++) {
if (!SameDefinedShape(c, existing[i].shape, updated[i].shape) ||
existing[i].dtype != updated[i].dtype) {
return true;
}
}
return false;
}
} | #include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class ShapeRefinerTest : public ::testing::Test {
protected:
bool SameHandle(shape_inference::DimensionHandle a,
shape_inference::DimensionHandle b) {
return a.SameHandle(b);
}
bool SameHandle(shape_inference::ShapeHandle a,
shape_inference::ShapeHandle b) {
return a.SameHandle(b);
}
bool SameDefinedShape(shape_inference::InferenceContext* c,
shape_inference::ShapeHandle s0,
shape_inference::ShapeHandle s1) {
return ShapeRefiner::SameDefinedShape(c, s0, s1);
}
bool IsUpdatedShapesOrTypes(
shape_inference::InferenceContext* c,
const std::vector<shape_inference::ShapeAndType>& existing,
const std::vector<shape_inference::ShapeAndType>& updated) {
return ShapeRefiner::IsUpdatedShapesOrTypes(c, existing, updated);
}
static constexpr int64_t kMaxTensorSize = ShapeRefiner::kMaxTensorSize;
void TestStridedSlice(const PartialTensorShape& input_shape, int begin,
int end, int stride, const char* expected,
int begin_mask = 0, int end_mask = 0,
int ellipsis_mask = 0, int shrink_axis_mask = 0,
StringPiece test_op = "TensorAsShapeInt32") {
Scope root = Scope::DisabledShapeInferenceScope();
auto placeholder =
ops::Placeholder(root, DT_INT32, ops::Placeholder::Shape(input_shape));
auto input = ops::Shape(root, placeholder);
auto begin_op = ops::Const(root, {begin});
auto end_op = ops::Const(root, {end});
auto stride_op = ops::Const(root, {stride});
auto slice = ops::StridedSlice(root, input, begin_op, end_op, stride_op,
ops::StridedSlice::BeginMask(begin_mask)
.EndMask(end_mask)
.EllipsisMask(ellipsis_mask)
.ShrinkAxisMask(shrink_axis_mask));
Node* result;
TF_ASSERT_OK(NodeBuilder("test", test_op)
.Input(slice.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(placeholder.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(begin_op.node()));
TF_ASSERT_OK(m.AddNode(end_op.node()));
TF_ASSERT_OK(m.AddNode(stride_op.node()));
TF_ASSERT_OK(m.AddNode(slice.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ(ctx->DebugString(ctx->output(0)), expected);
}
};
namespace {
#define EXPECT_SHAPE(EXPECTED, M, OP, IDX) \
do { \
shape_inference::InferenceContext* ctx = M.GetContext(OP.node()); \
EXPECT_EQ(EXPECTED, ctx->DebugString(ctx->output(IDX))); \
} while (0);
#define EXPECT_RESOURCE_SINGLE_SHAPE(EXPECTED, M, OP, IDX) \
do { \
shape_inference::InferenceContext* ctx = M.GetContext(OP.node()); \
auto* v = ctx->output_handle_shapes_and_types(IDX); \
EXPECT_NE(v, nullptr); \
EXPECT_EQ(v->size(), 1); \
EXPECT_EQ(EXPECTED, ctx->DebugString((*v)[0].shape)); \
} while (0);
#define EXPECT_RESOURCE_SINGLE_TYPE(EXPECTED, M, OP, IDX) \
do { \
shape_inference::InferenceContext* ctx = M.GetContext(OP.node()); \
auto* v = ctx->output_handle_shapes_and_types(IDX); \
EXPECT_NE(v, nullptr); \
EXPECT_EQ(v->size(), 1); \
EXPECT_EQ(EXPECTED, (*v)[0].dtype); \
} while (0);
TEST_F(ShapeRefinerTest, Constant) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(c.node()));
EXPECT_SHAPE("[]", m, c, 0);
}
TEST_F(ShapeRefinerTest, MatMul) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Scope root = Scope::NewRootScope();
auto a = ops::Const(root, {{1.0f}, {2.0f}});
auto b = ops::Const(root, {{1.0f, 2.0f}});
auto mm = ops::MatMul(root, a, b);
TF_ASSERT_OK(m.AddNode(a.node()));
TF_ASSERT_OK(m.AddNode(b.node()));
TF_ASSERT_OK(m.AddNode(mm.node()));
EXPECT_SHAPE("[2,1]", m, a, 0);
EXPECT_SHAPE("[1,2]", m, b, 0);
EXPECT_SHAPE("[2,2]", m, mm, 0);
}
TEST_F(ShapeRefinerTest, BadShapes) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Scope root = Scope::NewRootScope();
auto a = ops::Const(root, {{1.0f}, {2.0f}});
auto b = ops::Const(root, {{1.0f}, {2.0f}});
auto mm = ops::MatMul(root, a, b);
TF_ASSERT_OK(m.AddNode(a.node()));
TF_ASSERT_OK(m.AddNode(b.node()));
Status s = m.AddNode(mm.node());
ASSERT_FALSE(s.ok());
ASSERT_TRUE(absl::StrContains(s.message(),
"Dimensions must be equal, but are 1 and 2"));
}
TEST_F(ShapeRefinerTest, SetShape) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Scope root = Scope::NewRootScope();
auto a = ops::Placeholder(root, DT_FLOAT);
TF_ASSERT_OK(m.AddNode(a.node()));
auto ic = m.GetContext(a.node());
ASSERT_NE(nullptr, ic);
shape_inference::ShapeHandle h = ic->MakeShape({2, ic->UnknownDim()});
TF_ASSERT_OK(m.SetShape(a.node(), 0, h));
EXPECT_SHAPE("[2,?]", m, a, 0);
shape_inference::ShapeHandle h2 = ic->MakeShape({ic->UnknownDim(), 2});
TF_ASSERT_OK(m.SetShape(a.node(), 0, h2));
EXPECT_SHAPE("[2,2]", m, a, 0);
ASSERT_FALSE(m.SetShape(a.node(), 1, h).ok());
ASSERT_FALSE(m.SetShape(a.node(), -1, h).ok());
auto b = ops::Const(root, {{1.0f}, {2.0f}});
ASSERT_FALSE(m.SetShape(b.node(), 0, h).ok());
h = ic->MakeShape({3, ic->UnknownDim()});
ASSERT_FALSE(m.SetShape(a.node(), 0, h).ok());
}
namespace {
REGISTER_OP("TestOpWithNoShapeFn").Input("a: int32").Output("o: int32");
}
TEST_F(ShapeRefinerTest, MissingShapeInferenceFns) {
Scope root = Scope::NewRootScope();
auto a = ops::Const(root, 42);
Node* b;
TF_ASSERT_OK(NodeBuilder("b", "TestOpWithNoShapeFn")
.Input(a.node())
.Finalize(root.graph(), &b));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(a.node()));
EXPECT_FALSE(m.AddNode(b).ok());
m.set_require_shape_inference_fns(false);
TF_EXPECT_OK(m.AddNode(b));
}
TEST_F(ShapeRefinerTest, PropagateConstants) {
{
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto dim = ops::Variable(root, {}, DT_INT32);
auto am = ops::ArgMax(root, input, dim);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(dim.node()));
TF_ASSERT_OK(m.AddNode(am.node()));
EXPECT_SHAPE("[?]", m, am, 0);
}
{
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto dim = ops::Const(root, 1);
auto am = ops::ArgMax(root, input, dim);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(dim.node()));
TF_ASSERT_OK(m.AddNode(am.node()));
EXPECT_SHAPE("[3]", m, am, 0);
}
{
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto dim = ops::Const(root, 0);
auto am = ops::ArgMax(root, input, dim);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(dim.node()));
TF_ASSERT_OK(m.AddNode(am.node()));
EXPECT_SHAPE("[2]", m, am, 0);
}
}
TEST_F(ShapeRefinerTest, ExtractConstantSubgraphMultiOutput) {
{
Scope root = Scope::NewRootScope();
auto small = ops::Const(root, {static_cast<int32>(1), TensorShape({1, 1})});
auto large = ops::Const(
root, {static_cast<int32>(2), TensorShape({4, kMaxTensorSize / 2})});
Node* multi;
TF_ASSERT_OK(NodeBuilder("MI", "MultiIdentity")
.Input(std::vector<NodeBuilder::NodeOut>{small.node(),
large.node()})
.Attr("N", 2)
.Finalize(root.graph(), &multi));
Node* shape_v;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeVectorForAllElements")
.Input(multi, 0)
.Finalize(root.graph(), &shape_v));
auto add = ops::Add(root, Output(multi, 0), Output(multi, 1));
Node* shape_v2;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeVectorForAllElements")
.Input(add.node())
.Finalize(root.graph(), &shape_v2));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(small.node()));
TF_ASSERT_OK(m.AddNode(large.node()));
TF_ASSERT_OK(m.AddNode(multi));
TF_ASSERT_OK(m.AddNode(shape_v));
TF_ASSERT_OK(m.AddNode(add.node()));
TF_ASSERT_OK(m.AddNode(shape_v2));
shape_inference::InferenceContext* ctx = m.GetContext(shape_v2);
EXPECT_EQ(strings::StrCat("[", kMaxTensorSize * 2 * 3, "]"),
ctx->DebugString(ctx->output(0)));
}
}
namespace {
REGISTER_OP("TestOp")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->input_tensor(0)) {
if (c->input_tensor(1)) {
c->set_output(0, c->Matrix(10, 10));
return absl::OkStatus();
}
return shape_inference::ScalarShape(c);
}
return shape_inference::UnknownShape(c);
});
}
TEST_F(ShapeRefinerTest, InputTensorDependencies) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Graph graph(OpRegistry::Global());
Node* node;
Tensor a(DT_FLOAT, TensorShape({}));
a.scalar<float>()() = 1.0;
Tensor b(DT_FLOAT, TensorShape({}));
b.scalar<float>()() = 2.0;
Node* input_a = test::graph::Constant(&graph, a);
Node* input_b = test::graph::Constant(&graph, b);
TF_ASSERT_OK(NodeBuilder("Test", "TestOp")
.Input(input_a)
.Input(input_b)
.Finalize(&graph, &node));
TF_ASSERT_OK(m.AddNode(input_a));
TF_ASSERT_OK(m.AddNode(input_b));
TF_ASSERT_OK(m.AddNode(node));
shape_inference::InferenceContext* ctx = m.GetContext(node);
EXPECT_EQ("[10,10]", ctx->DebugString(ctx->output(0)));
}
namespace {
REGISTER_OP("ShapeData")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn([](shape_inference::InferenceContext* c) {
const Tensor* shape_data = c->input_tensor(0);
if (shape_data == nullptr) {
return shape_inference::UnknownShape(c);
}
std::vector<shape_inference::DimensionHandle> dims;
dims.reserve(shape_data->NumElements());
for (int i = 0; i < shape_data->NumElements(); ++i) {
dims.emplace_back(c->MakeDim(shape_data->flat<int32>()(i)));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("ShapeDataInt64")
.Input("a: int64")
.Output("o: int64")
.SetShapeFn([](shape_inference::InferenceContext* c) {
const Tensor* shape_data = c->input_tensor(0);
if (shape_data == nullptr) {
return shape_inference::UnknownShape(c);
}
std::vector<shape_inference::DimensionHandle> dims;
dims.reserve(shape_data->NumElements());
for (int i = 0; i < shape_data->NumElements(); ++i) {
dims.emplace_back(c->MakeDim(shape_data->flat<int64_t>()(i)));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("ShapeVectorForAllElements")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn([](shape_inference::InferenceContext* c) {
const Tensor* shape_data = c->input_tensor(0);
if (shape_data == nullptr) {
return shape_inference::UnknownShape(c);
}
int64_t total = 0;
for (int i = 0; i < shape_data->NumElements(); ++i) {
total += shape_data->flat<int32>()(i);
}
c->set_output(0, c->Vector(total));
return absl::OkStatus();
});
REGISTER_OP("MultiIdentity")
.Input("a: N * int32")
.Output("o: N * int32")
.Attr("N: int >= 1")
.SetShapeFn([](shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_inputs(); ++i) {
c->set_output(i, c->input(i));
}
return absl::OkStatus();
});
class MultiIdentity : public OpKernel {
public:
explicit MultiIdentity(OpKernelConstruction* c) : OpKernel(c) {}
void Compute(OpKernelContext* c) override {
for (int i = 0; i < c->num_inputs(); ++i) {
c->set_output(i, c->input(i));
}
}
};
REGISTER_KERNEL_BUILDER(Name("MultiIdentity").Device(DEVICE_CPU),
MultiIdentity);
}
TEST_F(ShapeRefinerTest, PropagateShapeAcrossTensorContent) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(root, {2, 4}, DT_INT32);
auto shape = ops::Shape(root, input);
auto ones = ops::Const(root, {1});
auto sliced = ops::Slice(root, shape, ones, ones);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(sliced.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(ones.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(sliced.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[4]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateShapeAcrossTensorContentInt64) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root, {2, 4, static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT64);
auto attrs = ops::Shape::OutType(DT_INT64);
auto shape = ops::Shape(root, input, attrs);
auto ones = ops::Const(root, {1});
auto sliced = ops::Slice(root, shape, ones, ones);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeDataInt64")
.Input(sliced.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(ones.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(sliced.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[4]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateShapeAcrossTensorContentInt32Overflow) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root, {2, 4, static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT32);
auto shape = ops::Shape(root, input);
auto ones = ops::Const(root, {1});
auto sliced = ops::Slice(root, shape, ones, ones);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(sliced.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(ones.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(sliced.node()));
EXPECT_FALSE(m.AddNode(shape_data).ok());
}
TEST_F(ShapeRefinerTest, PropagateRankAcrossTensorContent) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(root, {2, 4, 3}, DT_INT32);
auto rank = ops::Rank(root, input);
auto identity = ops::Identity(root, rank);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(rank.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[3]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSizeAcrossTensorContent) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(root, {1, 2, 3, 4, 5}, DT_INT32);
auto size = ops::Size(root, input);
auto identity = ops::Identity(root, size);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[120]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSizeAcrossTensorContentInt64) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root,
{1, 2, 3, 4, 5,
static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT64);
auto attrs = ops::Size::OutType(DT_INT64);
auto size = ops::Size(root, input, attrs);
auto identity = ops::Identity(root, size);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeDataInt64")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[515396075280]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSizeAcrossTensorContentInt32Overflow) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root,
{1, 2, 3, 4, 5,
static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT32);
auto size = ops::Size(root, input);
auto identity = ops::Identity(root, size);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
EXPECT_FALSE(m.AddNode(shape_data).ok());
}
TEST_F(ShapeRefinerTest, PropagateShape) {
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto shape = ops::Shape(root, input);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(shape.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[3,2]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSize) {
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto size = ops::Size(root, input);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(size.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[6]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateRank) {
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto rank = ops::Rank(root, input);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(rank.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(rank.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[2]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateRange) {
Scope root = Scope::NewRootScope();
auto begin = ops::Const(root, 1);
auto limit = ops::Const(root, 11);
auto delta = ops::Const(root, 3);
auto range = ops::Range(root, begin, limit, delta);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(range.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(begin.node()));
TF_ASSERT_OK(m.AddNode(limit.node()));
TF_ASSERT_OK(m.AddNode(delta.node()));
TF_ASSERT_OK(m.AddNode(range.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[1,4,7,10]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, NoPropagatePlaceholderWithDefault) {
Scope root = Scope::NewRootScope();
auto constant = ops::Const<int>(root, 2);
auto placeholder =
ops::PlaceholderWithDefault(root, constant, PartialTensorShape());
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(placeholder.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(constant.node()));
TF_ASSERT_OK(m.AddNode(placeholder.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ic = m.GetContext(shape_data);
EXPECT_EQ(ic->DebugString(ic->output(0)), "?");
}
TEST_F(ShapeRefinerTest, ConstantValueTwoInputsToSameNode) {
Scope root = Scope::NewRootScope();
auto begin_and_delta = ops::Const(root, 1);
auto limit = ops::Const(root, 4);
auto range = ops::Range(root, begin_and_delta, limit, begin_and_delta);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(range.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(begin_and_delta.node()));
TF_ASSERT_OK(m.AddNode(limit.node()));
TF_ASSERT_OK(m.AddNode(range.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[1,2,3]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueVisitNodeTwice) {
Scope root = Scope::NewRootScope();
auto begin = ops::Const(root, 1);
auto limit = ops::Const(root, 8);
auto delta = ops::Const(root, 3);
auto d1 = ops::Add(root, begin, limit);
auto d2 = ops::Add(root, begin, delta);
auto flimit = ops::Sub(root, begin, d1);
auto fdelta = ops::Sub(root, begin, d2);
auto nl = ops::Abs(root, flimit);
auto nd = ops::Abs(root, fdelta);
auto range = ops::Range(root, begin, nl, nd);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(range.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(begin.node()));
TF_ASSERT_OK(m.AddNode(limit.node()));
TF_ASSERT_OK(m.AddNode(delta.node()));
TF_ASSERT_OK(m.AddNode(d1.node()));
TF_ASSERT_OK(m.AddNode(d2.node()));
TF_ASSERT_OK(m.AddNode(flimit.node()));
TF_ASSERT_OK(m.AddNode(fdelta.node()));
TF_ASSERT_OK(m.AddNode(nl.node()));
TF_ASSERT_OK(m.AddNode(nd.node()));
TF_ASSERT_OK(m.AddNode(range.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[1,4,7]", ctx->DebugString(ctx->output(0)));
}
namespace {
Status TensorAsShapeShapeFn(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0 , &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status PartialTensorAsShapeShapeFn(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
const Tensor* t = c->input_tensor(0);
if (t == nullptr || t->NumElements() != 1) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
c->MakeShapeFromTensorShape(TensorShape({t->flat<int32>()(0)}), &out));
c->set_output(0, out);
return absl::OkStatus();
}
REGISTER_OP("PartialTensorAsShapeInt32")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn(PartialTensorAsShapeShapeFn);
REGISTER_OP("TensorAsShapeInt32")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn(TensorAsShapeShapeFn);
REGISTER_OP("TensorAsShapeInt64")
.Input("a: int64")
.Output("o: int64")
.SetShapeFn(TensorAsShapeShapeFn);
REGISTER_OP("NonConstScalarInt32")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("NonConstScalarInt64")
.Output("o: int64")
.SetDoNotOptimize()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("WithEmptyVectorShape")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Vector(0));
return absl::OkStatus();
});
REGISTER_OP("WithPartialShape")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(
0, c->MakeShape({1, shape_inference::InferenceContext::kUnknownDim, 3,
shape_inference::InferenceContext::kUnknownDim, 5}));
return absl::OkStatus();
});
REGISTER_OP("WithPartialShape2")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(
0,
c->MakeShape({6, shape_inference::InferenceContext::kUnknownDim, 8}));
return absl::OkStatus();
});
REGISTER_OP("WithUnknownShape")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_EmptyVector) {
Scope root = Scope::NewRootScope();
Node* input;
TF_ASSERT_OK(
NodeBuilder("in", "WithEmptyVectorShape").Finalize(root.graph(), &input));
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(input)
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_Shape) {
for (int pass = 0; pass < 2; ++pass) {
Scope root = Scope::NewRootScope();
Node* input;
TF_ASSERT_OK(
NodeBuilder("in", pass == 0 ? "WithPartialShape" : "WithUnknownShape")
.Finalize(root.graph(), &input));
auto shape = ops::Shape(root, Output(input));
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(shape.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
if (pass == 0) {
EXPECT_EQ("[1,?,3,?,5]", ctx->DebugString(ctx->output(0)));
} else {
EXPECT_EQ("?", ctx->DebugString(ctx->output(0)));
}
}
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInt32) {
Scope root = Scope::DisabledShapeInferenceScope();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt32")
.Finalize(root.graph(), &scalar_non_const));
InputList inputs{
Input(ops::Const<int32>(root, 10)),
Input(ops::Const<int32>(root, 20)),
Input(Output(scalar_non_const)),
Input(ops::Const<int32>(root, 40)),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[10,20,?,40]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInt64) {
Scope root = Scope::DisabledShapeInferenceScope();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt64")
.Finalize(root.graph(), &scalar_non_const));
InputList inputs{
Input(ops::Const<int64_t>(root, int64_t{10})),
Input(ops::Const<int64_t>(root, int64_t{20})),
Input(Output(scalar_non_const)),
Input(ops::Const<int64_t>(root, int64_t{1} << 40)),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt64")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[10,20,?,1099511627776]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackUnknownDim) {
Scope root = Scope::NewRootScope();
InputList inputs{
Input(ops::Const<int64_t>(root, int64_t{10})),
Input(ops::Const<int64_t>(root, int64_t{-1})),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt64")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[10,?]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInvalidInput) {
Scope root = Scope::NewRootScope();
InputList inputs{
Input(ops::Const<int64_t>(root, {int64_t{10}, int64_t{20}})),
Input(ops::Const<int64_t>(root, {int64_t{10}, int64_t{21}})),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt64")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
EXPECT_TRUE(absl::StrContains(m.AddNode(result).message(), "but is rank 2"));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_Concat) {
Scope root = Scope::DisabledShapeInferenceScope();
Graph* g = root.graph();
Node* partial_1;
Node* partial_2;
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape").Finalize(g, &partial_1));
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape2").Finalize(g, &partial_2));
auto const_input = ops::Const(root, {9, 10, 11});
OutputList concat_inputs{
ops::Shape(root, Output(partial_1)),
ops::Shape(root, Output(partial_2)),
const_input,
};
auto concat_dim = ops::Const(root, 0);
auto concat = ops::Concat(root, concat_inputs, concat_dim);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(concat.node())
.Finalize(g, &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(partial_1));
TF_ASSERT_OK(m.AddNode(partial_2));
for (const auto& o : concat_inputs) {
TF_ASSERT_OK(m.AddNode(o.node()));
}
TF_ASSERT_OK(m.AddNode(concat_dim.node()));
TF_ASSERT_OK(m.AddNode(concat.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[1,?,3,?,5,6,?,8,9,10,11]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_ConcatWithUnknown) {
Scope root = Scope::DisabledShapeInferenceScope();
Graph* g = root.graph();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt32")
.Finalize(root.graph(), &scalar_non_const));
Node* partial_1;
Node* partial_2;
Node* unknown;
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape").Finalize(g, &partial_1));
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape2").Finalize(g, &partial_2));
TF_ASSERT_OK(NodeBuilder("in", "WithUnknownShape").Finalize(g, &unknown));
OutputList concat_inputs{
ops::Shape(root, Output(partial_1)),
ops::Shape(root, Output(partial_2)),
ops::Shape(root, Output(unknown)),
};
auto concat_dim = ops::Const(root, 0);
auto concat = ops::Concat(root, concat_inputs, concat_dim);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(concat.node())
.Finalize(g, &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(partial_1));
TF_ASSERT_OK(m.AddNode(partial_2));
TF_ASSERT_OK(m.AddNode(unknown));
for (const auto& o : concat_inputs) {
TF_ASSERT_OK(m.AddNode(o.node()));
}
TF_ASSERT_OK(m.AddNode(concat_dim.node()));
TF_ASSERT_OK(m.AddNode(concat.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("?", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_ConcatInvalidDimValue) {
Scope root = Scope::DisabledShapeInferenceScope();
Graph* g = root.graph();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt32")
.Finalize(root.graph(), &scalar_non_const));
Node* partial_1;
Node* partial_2;
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape").Finalize(g, &partial_1));
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape2").Finalize(g, &partial_2));
auto const_input = ops::Const(root, {9, -2, 11});
OutputList concat_inputs{
ops::Shape(root, Output(partial_1)),
ops::Shape(root, Output(partial_2)),
const_input,
};
auto concat_dim = ops::Const(root, 0);
auto concat = ops::Concat(root, concat_inputs, concat_dim);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(concat.node())
.Finalize(g, &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(partial_1));
TF_ASSERT_OK(m.AddNode(partial_2));
for (const auto& o : concat_inputs) {
TF_ASSERT_OK(m.AddNode(o.node()));
}
TF_ASSERT_OK(m.AddNode(concat_dim.node()));
TF_ASSERT_OK(m.AddNode(concat.node()));
EXPECT_EQ("Invalid value in tensor used for shape: -2",
m.AddNode(result).message());
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSlice) {
TestStridedSlice(
{1, -1, 3, -1, 5},
2,
5,
1,
"[3,?,5]");
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceNegativeStride) {
TestStridedSlice(
{1, -1, 3, -1, 5},
10,
0,
-1,
"[5,?,3,?]");
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceMasks) {
TestStridedSlice(
{1, -1, 3, -1, 5},
3,
4,
1,
"[1,?,3,?,5]",
1,
1);
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceInvalidMask) {
TestStridedSlice(
{1, -1, 3},
2,
3,
1,
"[?,?,?]",
0,
0,
1);
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceWithShrinkAxis) {
TestStridedSlice(
{1, -1, 3, -1, 5},
2,
3,
1,
"[3]",
0,
0,
0,
1,
"PartialTensorAsShapeInt32");
}
TEST_F(ShapeRefinerTest,
ConstantValueAsShape_StridedSliceWithShrinkAxisOnUnknownDim) {
TestStridedSlice(
{1, -1, 3, -1, 5},
1,
2,
1,
"?",
0,
0,
0,
1,
"PartialTensorAsShapeInt32");
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceMulti) {
Scope root = Scope::DisabledShapeInferenceScope();
auto input = ops::Placeholder(root, DT_INT32);
auto begin = ops::Const(root, {0, 0});
auto end = ops::Const(root, {2, 2});
auto stride = ops::Const(root, {1, 1});
auto slice = ops::StridedSlice(root, input, begin, end, stride);
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(slice.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(begin.node()));
TF_ASSERT_OK(m.AddNode(end.node()));
TF_ASSERT_OK(m.AddNode(stride.node()));
TF_ASSERT_OK(m.AddNode(slice.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ(ctx->DebugString(ctx->output(0)), "?");
}
namespace {
REGISTER_OP("Dummy");
}
TEST_F(ShapeRefinerTest, SameDefinedShape) {
Scope root = Scope::NewRootScope();
Graph* g = root.graph();
Node* test;
TF_CHECK_OK(NodeBuilder("test", "Dummy").Finalize(g, &test));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
m.set_require_shape_inference_fns(false);
TF_ASSERT_OK(m.AddNode(test));
shape_inference::InferenceContext* ctx = m.GetContext(test);
auto unknown = ctx->UnknownShape();
auto unknown_b = ctx->UnknownShape();
auto s_1_2 = ctx->MakeShape({1, 2});
auto s_1_2_b = ctx->MakeShape({1, 2});
auto s_2_2 = ctx->MakeShape({2, 2});
auto s_unknown_2 = ctx->MakeShape({-1, 2});
auto s_unknown_2_b = ctx->MakeShape({-1, 2});
EXPECT_TRUE(SameDefinedShape(ctx, unknown, unknown));
EXPECT_FALSE(SameDefinedShape(ctx, unknown, unknown_b));
EXPECT_FALSE(SameDefinedShape(ctx, unknown, s_1_2));
EXPECT_TRUE(SameDefinedShape(ctx, s_1_2, s_1_2_b));
EXPECT_FALSE(SameDefinedShape(ctx, s_1_2, s_2_2));
EXPECT_TRUE(SameDefinedShape(ctx, s_unknown_2, s_unknown_2));
EXPECT_FALSE(SameDefinedShape(ctx, s_unknown_2, s_unknown_2_b));
}
TEST_F(ShapeRefinerTest, IsUpdatedShapesOrTypes) {
Scope root = Scope::NewRootScope();
Graph* g = root.graph();
Node* test;
TF_CHECK_OK(NodeBuilder("test", "Dummy").Finalize(g, &test));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
m.set_require_shape_inference_fns(false);
TF_ASSERT_OK(m.AddNode(test));
shape_inference::InferenceContext* ctx = m.GetContext(test);
shape_inference::ShapeHandle unknown = ctx->UnknownShape();
std::vector<shape_inference::ShapeAndType> t0{
{ctx->MakeShape({1, 2, 3}), DT_FLOAT},
{unknown, DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
std::vector<shape_inference::ShapeAndType> t1{
{ctx->MakeShape({1, 2, 3}), DT_FLOAT},
{unknown, DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
std::vector<shape_inference::ShapeAndType> t2{
{ctx->MakeShape({1, 2, 4}), DT_FLOAT},
{ctx->UnknownShape(), DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
std::vector<shape_inference::ShapeAndType> t3{
{ctx->MakeShape({1, 2, 3}), DT_INT32},
{ctx->UnknownShape(), DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
EXPECT_FALSE(IsUpdatedShapesOrTypes(ctx, t0, t1));
EXPECT_TRUE(IsUpdatedShapesOrTypes(ctx, t0, t2));
EXPECT_TRUE(IsUpdatedShapesOrTypes(ctx, t0, t3));
}
TEST_F(ShapeRefinerTest, IncrementalUpdates) {
Scope root = Scope::NewRootScope();
Graph* g = root.graph();
Node* queue;
TF_CHECK_OK(NodeBuilder("queue", "FIFOQueueV2")
.Attr("component_types", {DT_FLOAT})
.Finalize(g, &queue));
Node* dequeue;
TF_CHECK_OK(NodeBuilder("dequeue", "QueueDequeueV2")
.Attr("component_types", {DT_FLOAT})
.Input(queue)
.Finalize(g, &dequeue));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(queue));
TF_ASSERT_OK(m.AddNode(dequeue));
shape_inference::InferenceContext* ctx = m.GetContext(dequeue);
EXPECT_EQ("?", ctx->DebugString(ctx->output(0)));
ctx = m.GetContext(queue);
shape_inference::ShapeHandle shp = ctx->MakeShape({3, 7});
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp, DT_FLOAT}});
bool refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, false , &refined));
EXPECT_TRUE(refined);
ctx = m.GetContext(dequeue);
EXPECT_EQ("[3,7]", ctx->DebugString(ctx->output(0)));
ctx = m.GetContext(queue);
shp = ctx->MakeShape({2, 7});
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp, DT_FLOAT}});
refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, true , &refined));
EXPECT_TRUE(refined);
ctx = m.GetContext(dequeue);
EXPECT_EQ("[?,7]", ctx->DebugString(ctx->output(0)));
ctx = m.GetContext(queue);
shp = ctx->MakeShape({shape_inference::InferenceContext::kUnknownDim, 7});
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp, DT_FLOAT}});
refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, true , &refined));
EXPECT_TRUE(refined);
ctx = m.GetContext(dequeue);
EXPECT_EQ("[?,7]", ctx->DebugString(ctx->output(0)));
EXPECT_TRUE(SameHandle(ctx->Dim(ctx->output(0), 0), ctx->Dim(shp, 0)));
ctx = m.GetContext(queue);
shape_inference::ShapeHandle shp2 = shp;
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp2, DT_FLOAT}});
refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, false, &refined));
EXPECT_FALSE(refined);
EXPECT_TRUE(SameHandle(ctx->Dim(shp, 0), ctx->Dim(shp2, 0)));
}
void TestSimpleFunctionInference(bool enable_function_inference) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Const(root, {{1.0f, 2.0f}});
auto x2 = test::function::Call(&root, "x2", "XTimesTwo", {x});
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
if (enable_function_inference) {
m.set_function_library_for_shape_inference(&f_lib);
}
TF_ASSERT_OK(m.AddNode(x.node()));
TF_ASSERT_OK(m.AddNode(x2.node()));
EXPECT_SHAPE("[1,2]", m, x, 0);
if (enable_function_inference) {
EXPECT_SHAPE("[1,2]", m, x2, 0);
} else {
EXPECT_SHAPE("?", m, x2, 0);
}
}
TEST_F(ShapeRefinerTest, SimpleFunctionShapeInference_Disabled) {
TestSimpleFunctionInference(false );
}
TEST_F(ShapeRefinerTest, SimpleFunctionShapeInference) {
TestSimpleFunctionInference(true );
}
TEST_F(ShapeRefinerTest, FunctionShapeInferenceFallback) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Const(root, {{.0f, .0f}});
auto x2 = test::function::Call(&root, "x2", "XTimesTwo", {x});
FunctionDefLibrary empty_f_lib_proto;
FunctionLibraryDefinition empty_f_lib(OpRegistry::Global(),
empty_f_lib_proto);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
m.set_function_library_for_shape_inference(&empty_f_lib);
TF_ASSERT_OK(m.AddNode(x.node()));
TF_ASSERT_OK(m.AddNode(x2.node()));
EXPECT_SHAPE("[1,2]", m, x, 0);
EXPECT_SHAPE("?", m, x2, 0);
}
TEST_F(ShapeRefinerTest, ChainedFunctionShapeInferenceWithMultipleInputs) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
*(f_lib_proto.add_function()) = test::function::XTimesFour();
*(f_lib_proto.add_function()) = test::function::XTimes16();
*(f_lib_proto.add_function()) = test::function::WXPlusB();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto w = ops::Const(root, {{.0f}, {.0f}, {.0f}});
auto x = ops::Const(root, {{.0f, .0f, .0f}});
auto b = ops::Const(root, {{.0f}});
auto wxplusb = test::function::Call(&root, "wxplusb", "WXPlusB", {w, x, b});
auto wxplusb16 =
test::function::Call(&root, "wxplusb16", "XTimes16", {wxplusb});
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
m.set_function_library_for_shape_inference(&f_lib);
TF_ASSERT_OK(m.AddNode(w.node()));
TF_ASSERT_OK(m.AddNode(x.node()));
TF_ASSERT_OK(m.AddNode(b.node()));
TF_ASSERT_OK(m.AddNode(wxplusb.node()));
TF_ASSERT_OK(m.AddNode(wxplusb16.node()));
EXPECT_SHAPE("[3,1]", m, w, 0);
EXPECT_SHAPE("[1,3]", m, x, 0);
EXPECT_SHAPE("[1,1]", m, b, 0);
EXPECT_SHAPE("[3,3]", m, wxplusb, 0);
EXPECT_SHAPE("[3,3]", m, wxplusb16, 0);
}
TEST_F(ShapeRefinerTest, FunctionShapeInferenceWorksForResourceHandles) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::Swap();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x1 = ops::VarHandleOp(root, DataType::DT_FLOAT, TensorShape({128, 256}));
auto x2 = ops::VarHandleOp(root, DataType::DT_DOUBLE, TensorShape({1024}));
auto swap = test::function::Call(&root, "swap", "Swap", {x1, x2});
EXPECT_EQ(swap.node()->num_outputs(), 2);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
m.set_function_library_for_shape_inference(&f_lib);
TF_ASSERT_OK(m.AddNode(x1.node()));
TF_ASSERT_OK(m.AddNode(x2.node()));
TF_ASSERT_OK(m.AddNode(swap.node()));
EXPECT_EQ(m.GetContext(swap.node())->num_outputs(), 2);
EXPECT_RESOURCE_SINGLE_SHAPE("[128,256]", m, x1, 0);
EXPECT_RESOURCE_SINGLE_SHAPE("[1024]", m, x2, 0);
EXPECT_RESOURCE_SINGLE_SHAPE("[1024]", m, swap, 0);
EXPECT_RESOURCE_SINGLE_SHAPE("[128,256]", m, swap, 1);
EXPECT_RESOURCE_SINGLE_TYPE(DataType::DT_DOUBLE, m, swap, 0);
EXPECT_RESOURCE_SINGLE_TYPE(DataType::DT_FLOAT, m, swap, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/shape_refiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/shape_refiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a9c3669-9b4c-4efe-bafe-3ce365d94cce | cpp | google/arolla | qtype_constraint | arolla/expr/operator_loader/qtype_constraint.cc | arolla/expr/operator_loader/qtype_constraint_test.cc | #include "arolla/expr/operator_loader/qtype_constraint.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/thread_safe_model_executor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/operator_loader/helper.h"
#include "arolla/expr/operator_loader/parameter_qtypes.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
namespace {
using ::arolla::expr::BindOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::GetLeafKeys;
using ::arolla::expr::Literal;
using ::arolla::expr::MakeTupleOperator;
using ::arolla::expr::PopulateQTypes;
using ::arolla::expr::ToDebugString;
absl::StatusOr<std::pair<ExprNodePtr, ExprNodePtr>>
PreprocessQTypeConstraint(ExprNodePtr expr) {
ExprNodePtr nothing_literal = Literal(GetNothingQType());
ASSIGN_OR_RETURN(auto predicate_expr, ReplacePlaceholdersWithLeaves(expr));
ExprNodePtr presence_expr = nullptr;
absl::flat_hash_map<std::string, QTypePtr> leaf_qtypes;
for (const auto& leaf_key : GetLeafKeys(predicate_expr)) {
leaf_qtypes[leaf_key] = GetQTypeQType();
ASSIGN_OR_RETURN(auto arg_is_present,
expr::CallOp("core.not_equal",
{nothing_literal, expr::Leaf(leaf_key)}));
if (presence_expr == nullptr) {
presence_expr = std::move(arg_is_present);
} else {
ASSIGN_OR_RETURN(
presence_expr,
expr::CallOp("core.presence_and",
{std::move(presence_expr), std::move(arg_is_present)}));
}
}
if (presence_expr == nullptr) {
presence_expr = Literal(OptionalUnit(kUnit));
}
auto deduce_output_qtype =
[&leaf_qtypes](const ExprNodePtr& e) -> const QType* {
if (auto annotated_expr = PopulateQTypes(e, leaf_qtypes);
annotated_expr.ok()) {
return (*annotated_expr)->qtype();
} else {
return nullptr;
}
};
DCHECK_EQ(deduce_output_qtype(presence_expr), GetQType<OptionalUnit>());
const QType* output_qtype = deduce_output_qtype(predicate_expr);
if (output_qtype == nullptr) {
return absl::InvalidArgumentError(
"error while computing output QType of a QType constraint predicate: " +
ToDebugString(expr));
}
if (output_qtype != GetQType<OptionalUnit>()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected a constraint predicate to return %s, got %s: %s",
GetQType<OptionalUnit>()->name(), output_qtype->name(),
ToDebugString(expr)));
}
return std::pair(std::move(predicate_expr), std::move(presence_expr));
}
std::string FormatQTypeNames(absl::string_view message,
const ParameterQTypes& parameter_qtypes) {
absl::flat_hash_map<std::string, std::string> replacements;
replacements.reserve(parameter_qtypes.size());
for (const auto& [param_name, param_qtype] : parameter_qtypes) {
replacements[absl::StrFormat("{%s}", param_name)] =
std::string(param_qtype->name());
if (IsTupleQType(param_qtype)) {
replacements[absl::StrFormat("{*%s}", param_name)] =
"(" +
absl::StrJoin(param_qtype->type_fields(), ", ",
[](std::string* out, const auto& field_slot) {
const absl::string_view name =
field_slot.GetType()->name();
out->append(name.data(), name.size());
}) +
")";
}
}
return absl::StrReplaceAll(message, replacements);
}
}
absl::StatusOr<QTypeConstraintFn> MakeQTypeConstraintFn(
absl::Span<const QTypeConstraint> constraints) {
if (constraints.empty()) {
return [](const ParameterQTypes&) -> absl::StatusOr<bool> { return true; };
}
std::vector<std::string> error_messages;
std::vector<ExprNodePtr> exprs;
exprs.reserve(constraints.size() * 2);
error_messages.reserve(constraints.size());
for (const auto& constraint : constraints) {
ASSIGN_OR_RETURN(
(auto [predicate_expr, presence_expr]),
PreprocessQTypeConstraint(constraint.predicate_expr));
exprs.emplace_back(std::move(predicate_expr));
exprs.emplace_back(std::move(presence_expr));
error_messages.emplace_back(constraint.error_message);
}
ASSIGN_OR_RETURN(auto expr, BindOp(MakeTupleOperator::Make(), exprs, {}));
ASSIGN_OR_RETURN(auto executor, MakeParameterQTypeModelExecutor(expr));
return [executor = std::move(executor),
error_messages = std::move(error_messages)](
const ParameterQTypes& parameter_qtypes) -> absl::StatusOr<bool> {
ASSIGN_OR_RETURN(auto values, executor(parameter_qtypes));
DCHECK(IsTupleQType(values.GetType()));
DCHECK(values.GetFieldCount() == error_messages.size() * 2);
bool all_args_present = true;
for (size_t i = 0; i < error_messages.size(); ++i) {
ASSIGN_OR_RETURN(OptionalUnit fulfilled,
values.GetField(i * 2 + 0).As<OptionalUnit>());
ASSIGN_OR_RETURN(OptionalUnit args_present,
values.GetField(i * 2 + 1).As<OptionalUnit>());
all_args_present = all_args_present && args_present;
if (args_present && !fulfilled) {
return absl::InvalidArgumentError(
FormatQTypeNames(error_messages[i], parameter_qtypes));
}
}
return all_args_present;
};
}
} | #include "arolla/expr/operator_loader/qtype_constraint.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::testing::HasSubstr;
class QTypeConstraintTest : public ::testing::Test {
protected:
static absl::StatusOr<QTypeConstraintFn> SampleConstraintFn() {
ASSIGN_OR_RETURN(auto x_is_scalar_qtype_expr,
CallOp("qtype.is_scalar_qtype", {Placeholder("x")}));
ASSIGN_OR_RETURN(auto y_is_scalar_qtype_expr,
CallOp("qtype.is_scalar_qtype", {Placeholder("y")}));
ASSIGN_OR_RETURN(
auto x_y_has_common_qtype_expr,
CallOp("core.not_equal", {CallOp("qtype.common_qtype",
{Placeholder("x"), Placeholder("y")}),
Literal(GetNothingQType())}));
return MakeQTypeConstraintFn({
{x_is_scalar_qtype_expr, "expected `x` to be scalar, got {x}"},
{y_is_scalar_qtype_expr, "expected `y` to be scalar, got {y}"},
{x_y_has_common_qtype_expr, "no common qtype for x:{x} and y:{y}"},
});
}
static absl::StatusOr<QTypeConstraintFn> SampleConstraintWithVariadicFn() {
auto false_expr = Literal(OptionalUnit{});
return MakeQTypeConstraintFn({
{false_expr, "*x: {*x}"},
});
}
};
TEST_F(QTypeConstraintTest, Trivial) {
ASSERT_OK_AND_ASSIGN(auto fn, MakeQTypeConstraintFn({}));
EXPECT_THAT(fn({}), IsOkAndHolds(true));
}
TEST_F(QTypeConstraintTest, Ok) {
ASSERT_OK_AND_ASSIGN(auto fn, SampleConstraintFn());
EXPECT_THAT(fn({
{"x", GetQType<int64_t>()},
{"y", GetQType<int32_t>()},
}),
IsOkAndHolds(true));
EXPECT_THAT(fn({
{"x", GetQType<int64_t>()},
{"y", GetNothingQType()},
}),
IsOkAndHolds(false));
EXPECT_THAT(fn({
{"x", GetNothingQType()},
{"y", GetQType<int32_t>()},
}),
IsOkAndHolds(false));
}
TEST_F(QTypeConstraintTest, ErrorMessage) {
ASSERT_OK_AND_ASSIGN(auto fn, SampleConstraintFn());
EXPECT_THAT(
fn({
{"x", GetQType<int64_t>()},
{"y", GetQType<ScalarShape>()},
}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected `y` to be scalar, got SCALAR_SHAPE")));
EXPECT_THAT(
fn({
{"x", GetNothingQType()},
{"y", GetQType<ScalarShape>()},
}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected `y` to be scalar, got SCALAR_SHAPE")));
EXPECT_THAT(fn({
{"x", GetQType<int32_t>()},
{"y", GetQType<Bytes>()},
}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common qtype for x:INT32 and y:BYTES")));
}
TEST_F(QTypeConstraintTest, NoOutputQType) {
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("core.get_nth", {Placeholder("x"), Placeholder("y")}));
EXPECT_THAT(
MakeQTypeConstraintFn({{expr, ""}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("error while computing output QType of a QType "
"constraint predicate: "
"M.core.get_nth(P.x, P.y)")));
}
TEST_F(QTypeConstraintTest, BadOutputQType) {
auto x = Placeholder("x");
EXPECT_THAT(MakeQTypeConstraintFn({{x, ""}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected a constraint predicate to return "
"OPTIONAL_UNIT, got QTYPE: P.x")));
}
TEST_F(QTypeConstraintTest, VariadicConstraint) {
ASSERT_OK_AND_ASSIGN(auto fn, SampleConstraintWithVariadicFn());
EXPECT_THAT(
fn({{"x", MakeTupleQType({})}}),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("*x: ()")));
EXPECT_THAT(fn({
{"x", MakeTupleQType({GetQType<int32_t>(), GetQType<float>(),
GetQType<bool>()})},
}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("*x: (INT32, FLOAT32, BOOLEAN)")));
EXPECT_THAT(
fn({
{"x", GetQType<int64_t>()},
}),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("*x: {*x}")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/qtype_constraint.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/qtype_constraint_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
871d31db-5559-4d8e-9619-1a704c808ec6 | cpp | google/tsl | status | tsl/platform/status.cc | tsl/platform/status_test.cc | #include "tsl/platform/status.h"
#include <stdio.h>
#include <deque>
#include <functional>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
class StatusLogSink : public TFLogSink {
public:
static StatusLogSink* GetInstance() {
static StatusLogSink* sink = new StatusLogSink();
return sink;
}
void enable() {
absl::call_once(flag_, [this] {
num_messages_ = 5;
if (const char* num_msgs_str =
getenv("TF_WORKER_NUM_FORWARDED_LOG_MESSAGES")) {
if (!absl::SimpleAtoi(num_msgs_str, &num_messages_)) {
LOG(WARNING) << "Failed to parse env variable "
"TF_WORKER_NUM_WARNING_ERROR_LOG_IN_STATUS="
<< num_msgs_str << " as int. Using the default value "
<< num_messages_ << ".";
}
}
if (num_messages_ > 0) {
TFAddLogSink(this);
}
});
}
void GetMessages(std::vector<std::string>* logs) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
for (auto& msg : messages_) {
logs->push_back(msg);
}
}
void Send(const TFLogEntry& entry) override TF_LOCKS_EXCLUDED(mu_) {
if (entry.log_severity() < absl::LogSeverity::kWarning) return;
mutex_lock lock(mu_);
messages_.emplace_back(entry.ToString());
if (messages_.size() > static_cast<size_t>(num_messages_)) {
messages_.pop_front();
}
}
private:
mutex mu_;
absl::once_flag flag_;
int num_messages_ = 0;
std::deque<std::string> messages_ TF_GUARDED_BY(mu_);
};
}
namespace errors {
static constexpr const char kStackTraceProtoUrl[] =
"type.googleapis.com/tensorflow.StackTracePayload";
void SetStackTrace(absl::Status& status, std::vector<StackFrame> stack_trace) {
std::vector<std::string> items;
items.reserve(stack_trace.size());
for (StackFrame& frame : stack_trace) {
items.push_back(
absl::StrCat(absl::StrReplaceAll(frame.file_name, {{"\n", ""}}), "\n",
frame.line_number, "\n",
absl::StrReplaceAll(frame.function_name, {{"\n", ""}})));
}
status.SetPayload(kStackTraceProtoUrl,
absl::Cord(absl::StrJoin(items, "\n")));
}
std::vector<StackFrame> GetStackTrace(const absl::Status& status) {
std::vector<StackFrame> stack_trace;
absl::optional<absl::Cord> maybe_serialized_payload =
status.GetPayload(kStackTraceProtoUrl);
if (maybe_serialized_payload.has_value()) {
std::vector<std::string> split =
absl::StrSplit(maybe_serialized_payload.value().Flatten(), '\n');
assert(split.size() % 3 == 0);
for (int i = 0; i < split.size() / 3; ++i) {
const int idx = 3 * i;
int line_number = -1;
CHECK(absl::SimpleAtoi(split[idx + 1], &line_number));
stack_trace.emplace_back(std::move(split[idx]), line_number,
std::move(split[idx + 2]));
}
}
return stack_trace;
}
}
#ifdef _WIN32
const char* NullTerminatedMessage(const absl::Status& status) {
return absl::StatusMessageAsCStr(status);
}
#endif
std::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg) {
std::stringstream ss;
ss << "Non-OK-status: " << msg << "\nStatus: " << v;
return new std::string(ss.str());
}
StatusGroup::StatusGroup() {}
StatusGroup::StatusGroup(std::initializer_list<absl::Status> statuses) {
for (const absl::Status& s : statuses) {
Update(s);
}
}
static constexpr const char kDerivedStatusProtoUrl[] =
"type.googleapis.com/tensorflow.DerivedStatus";
absl::Status StatusGroup::MakeDerived(const absl::Status& s) {
if (IsDerived(s)) {
return s;
} else {
absl::Status derived(s);
derived.SetPayload(kDerivedStatusProtoUrl, absl::Cord(""));
return derived;
}
}
bool StatusGroup::IsDerived(const absl::Status& s) {
return s.GetPayload(kDerivedStatusProtoUrl).has_value();
}
void StatusGroup::ConfigureLogHistory() {
StatusLogSink::GetInstance()->enable();
}
void StatusGroup::Update(const absl::Status& s) {
if (s.ok()) {
++num_ok_;
} else {
ok_ = false;
if (IsDerived(s)) {
derived_.insert(s);
} else {
non_derived_.insert(s);
}
}
}
static constexpr int kMaxAggregatedStatusMessageSize = 8 * 1024;
static constexpr int kMaxAttachedLogMessageSize = 512;
std::unordered_map<std::string, absl::Cord> StatusGroup::GetPayloads() const {
std::unordered_map<std::string, absl::Cord> payloads;
auto capture_payload = [&payloads](absl::string_view key,
const absl::Cord& value) {
payloads[std::string(key)] = value;
};
for (const auto& status : derived_) {
status.ForEachPayload(capture_payload);
}
for (const auto& status : non_derived_) {
status.ForEachPayload(capture_payload);
}
payloads.erase(kDerivedStatusProtoUrl);
return payloads;
}
absl::Status MakeStatus(
absl::StatusCode code, absl::string_view message,
const std::unordered_map<std::string, absl::Cord>& payloads) {
absl::Status status(code, message);
for (const auto& payload : payloads) {
status.SetPayload(payload.first, payload.second);
}
return status;
}
std::string MakeString(const absl::Status& status) {
return absl::StrCat(absl::StatusCodeToString(status.code()), ": ",
status.message());
}
absl::Status StatusGroup::as_summary_status() const {
if (ok_) {
return absl::OkStatus();
}
auto get_recent_logs = [this]() -> std::string {
if (!recent_logs_.empty()) {
std::vector<std::string> fmt;
fmt.push_back("\nRecent warning and error logs:");
for (auto& log : recent_logs_) {
fmt.push_back(" " + log.substr(0, kMaxAttachedLogMessageSize));
}
return absl::StrJoin(fmt, "\n");
} else {
return "";
}
};
if (non_derived_.size() == 1) {
return MakeStatus(
non_derived_.begin()->code(),
strings::StrCat(non_derived_.begin()->message(), get_recent_logs()),
GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<std::string> fmt;
fmt.push_back(
strings::Printf("%zu root error(s) found.", non_derived_.size()));
int index = 0;
auto code = absl::StatusCode::kCancelled;
for (const auto& s : non_derived_) {
if (code == absl::StatusCode::kCancelled &&
s.code() != absl::StatusCode::kCancelled) {
code = s.code();
}
fmt.emplace_back(strings::StrCat(" (", index, ") ", MakeString(s)));
++index;
}
fmt.push_back(strings::Printf("%zu successful operations.", num_ok_));
fmt.push_back(
strings::Printf("%zu derived errors ignored.", derived_.size()));
std::string error_msg =
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize);
return MakeStatus(code, strings::StrCat(error_msg, get_recent_logs()),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
absl::Status StatusGroup::as_concatenated_status() const {
if (ok_) {
return absl::OkStatus();
}
if (non_derived_.size() == 1) {
return MakeStatus(non_derived_.begin()->code(),
non_derived_.begin()->message(), GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<string> fmt;
fmt.emplace_back("\n=====================");
for (const auto& s : non_derived_) {
fmt.emplace_back(MakeString(s));
}
fmt.emplace_back("=====================\n");
return MakeStatus(
non_derived_.begin()->code(),
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
void StatusGroup::AttachLogMessages() {
recent_logs_.clear();
StatusLogSink::GetInstance()->GetMessages(&recent_logs_);
}
} | #include "tsl/platform/status.h"
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
using ::testing::IsEmpty;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(ToStringTest, PayloadsArePrinted) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
EXPECT_EQ(status.ToString(),
"ABORTED: Aborted Error Message [payload_key='payload_value "
"\\x01\\x02\\x03']");
}
TEST(ToStringTest, MatchesAbslStatus) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
absl::Status absl_status =
absl::Status(absl::StatusCode::kAborted, status.message());
absl_status.SetPayload("payload_key", absl::Cord(absl::StrFormat(
"payload_value %c%c%c", 1, 2, 3)));
EXPECT_EQ(status.ToString(), absl_status.ToString());
}
TEST(StackTrace, SerializeAndDeserializeCorrectly) {
absl::Status status = errors::Aborted("Aborted Error Message");
std::vector<StackFrame> stack_trace;
stack_trace.push_back(StackFrame("filename_1", 33, "func_name_1"));
stack_trace.push_back(StackFrame("filename_2", 66, "func_name_2"));
errors::SetStackTrace(status, stack_trace);
std::vector<StackFrame> deserialized = errors::GetStackTrace(status);
EXPECT_EQ(stack_trace.size(), deserialized.size());
for (size_t i = 0; i < stack_trace.size(); ++i) {
EXPECT_EQ(stack_trace[i], deserialized[i]);
}
}
TEST(StatusGroupTest, DeterministicOrderWithoutPayloads) {
absl::Status status_a = errors::Aborted("Status A");
absl::Status status_b = errors::Aborted("Status B");
absl::Status status_c = errors::Aborted("Status C");
absl::Status combined =
StatusGroup({status_a, status_b, status_c}).as_summary_status();
EXPECT_EQ(combined,
StatusGroup({status_a, status_b, status_c}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_a, status_c, status_b}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_b, status_a, status_c}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_b, status_c, status_a}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_c, status_a, status_b}).as_summary_status());
EXPECT_EQ(combined,
StatusGroup({status_c, status_b, status_a}).as_summary_status());
}
TEST(StatusGroupTest, DeterministicOrderWithPayloads) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key", absl::Cord("payload_value_a"));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key", absl::Cord("payload_value_b"));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key", absl::Cord("payload_value_c"));
absl::Status combined =
StatusGroup({status_a, status_b, status_c}).as_summary_status();
ASSERT_TRUE(combined.GetPayload("payload_key").has_value());
std::string payload(combined.GetPayload("payload_key").value());
EXPECT_EQ(payload, StatusGroup({status_a, status_b, status_c})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_a, status_c, status_b})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_b, status_a, status_c})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_b, status_c, status_a})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_c, status_a, status_b})
.as_summary_status()
.GetPayload("payload_key"));
EXPECT_EQ(payload, StatusGroup({status_c, status_b, status_a})
.as_summary_status()
.GetPayload("payload_key"));
}
TEST(StatusGroupTest, PayloadsMergedProperly) {
absl::Status status_a = errors::Aborted("Status A");
status_a.SetPayload("payload_key_a",
absl::Cord(std::string("payload_value_a")));
absl::Status status_b = errors::Aborted("Status B");
status_b.SetPayload("payload_key_b",
absl::Cord(std::string("payload_value_b")));
absl::Status status_c = errors::Aborted("Status C");
status_c.SetPayload("payload_key_c",
absl::Cord(std::string("payload_value_c")));
absl::Status derived_status_c =
StatusGroup::MakeDerived(errors::Aborted("Status C"));
derived_status_c.SetPayload(
"payload_key_c", absl::Cord(std::string("derived_payload_value_c")));
StatusGroup status_group({status_a, status_b, status_c, derived_status_c});
EXPECT_THAT(status_group.GetPayloads(), ::testing::SizeIs(3));
absl::Status combined = status_group.as_summary_status();
EXPECT_EQ(combined.GetPayload("payload_key_a"), "payload_value_a");
EXPECT_EQ(combined.GetPayload("payload_key_b"), "payload_value_b");
EXPECT_EQ(combined.GetPayload("payload_key_c"), "payload_value_c");
}
TEST(Status, ErrorStatusForEachPayloadIteratesOverAll) {
absl::Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("key1", absl::Cord("value1"));
s.SetPayload("key2", absl::Cord("value2"));
s.SetPayload("key3", absl::Cord("value3"));
std::unordered_map<std::string, absl::Cord> payloads;
s.ForEachPayload([&payloads](absl::string_view key, const absl::Cord& value) {
payloads[std::string(key)] = value;
});
EXPECT_EQ(payloads.size(), 3);
EXPECT_EQ(payloads["key1"], "value1");
EXPECT_EQ(payloads["key2"], "value2");
EXPECT_EQ(payloads["key3"], "value3");
}
TEST(Status, OkStatusForEachPayloadNoIteration) {
absl::Status s = absl::OkStatus();
s.SetPayload("key1", absl::Cord("value1"));
s.SetPayload("key2", absl::Cord("value2"));
s.SetPayload("key3", absl::Cord("value3"));
std::unordered_map<std::string, absl::Cord> payloads;
s.ForEachPayload([&payloads](absl::string_view key, const absl::Cord& value) {
payloads[std::string(key)] = value;
});
EXPECT_EQ(payloads.size(), 0);
}
TEST(Status, SaveOKStatusToProto) {
tensorflow::StatusProto status_proto = StatusToProto(absl::OkStatus());
EXPECT_EQ(status_proto.code(), error::OK);
EXPECT_THAT(status_proto.message(), IsEmpty());
}
TEST(Status, SaveErrorStatusToProto) {
tensorflow::StatusProto status_proto =
StatusToProto(errors::NotFound("Not found"));
EXPECT_EQ(status_proto.code(), error::NOT_FOUND);
EXPECT_EQ(status_proto.message(), "Not found");
}
TEST(Status, SaveEmptyStatusToProto) {
tensorflow::StatusProto status_proto = StatusToProto(absl::Status());
EXPECT_EQ(status_proto.code(), error::OK);
EXPECT_THAT(status_proto.message(), IsEmpty());
}
TEST(Status, MakeOKStatusFromProto) {
tensorflow::StatusProto status_proto;
status_proto.set_code(error::OK);
EXPECT_THAT(StatusFromProto(status_proto), IsOk());
}
TEST(Status, MakeErrorStatusFromProto) {
tensorflow::StatusProto status_proto;
status_proto.set_code(error::INVALID_ARGUMENT);
status_proto.set_message("Invalid argument");
EXPECT_THAT(StatusFromProto(status_proto),
StatusIs(error::INVALID_ARGUMENT, "Invalid argument"));
}
TEST(Status, MakeStatusFromEmptyProto) {
EXPECT_THAT(StatusFromProto(tensorflow::StatusProto()), IsOk());
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/status.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/status_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
97a52a24-2e0a-4ad9-a56f-7f2e75f9a551 | cpp | tensorflow/tensorflow | parse_example | tensorflow/lite/kernels/parse_example/parse_example.cc | tensorflow/lite/kernels/parse_example/parse_example_test.cc | #include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/example_proto_fast_parsing.h"
#include "tensorflow/core/util/presized_cuckoo_map.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace parse_example {
namespace {
namespace tf = ::tensorflow;
using tf::Status;
using tf::StringPiece;
using tf::tstring;
using tf::example::CopyOrMoveBlock;
using tf::example::FastParseExampleConfig;
using tf::example::GetListFromBuffer;
using tf::example::LimitedArraySlice;
using tf::example::ParseExample;
using tf::example::SeededHasher;
using tf::example::SmallVector;
using tf::example::SparseBuffer;
using tf::example::Type;
using tf::example::parsed::Example;
using ConfigIndex = tf::PresizedCuckooMap<std::pair<int32_t, Type>>;
struct TfLiteResult {
std::vector<TfLiteTensor*> dense_values;
std::vector<TfLiteTensor*> sparse_values;
std::vector<TfLiteTensor*> sparse_indices;
std::vector<TfLiteTensor*> sparse_shapes;
std::map<int, tf::Tensor> dense_tensors;
};
template <typename T>
void FillAndCopyVarLen(const int d, const size_t num_elements,
const size_t num_elements_per_minibatch,
const FastParseExampleConfig& config,
std::vector<SparseBuffer>& varlen_dense_buffers,
TfLiteTensor* values) {
const tf::Tensor& default_value = config.dense[d].default_value;
std::fill(reinterpret_cast<T*>(values->data.raw),
reinterpret_cast<T*>(values->data.raw) + num_elements,
default_value.flat<T>()(0));
auto data = reinterpret_cast<T*>(values->data.raw);
const SparseBuffer& buffer = varlen_dense_buffers[d];
const auto& end_indices = buffer.example_end_indices;
const size_t examples_in_buffer = end_indices.size();
const auto& list = GetListFromBuffer<T>(buffer);
auto list_ptr = list.begin();
size_t elements_tally = 0;
for (size_t j = 0; j < examples_in_buffer; ++j) {
const size_t num_elems = end_indices[j] - elements_tally;
CopyOrMoveBlock(list_ptr, list_ptr + num_elems, data);
list_ptr += num_elems;
data += num_elements_per_minibatch;
elements_tally = end_indices[j];
}
DCHECK(elements_tally == list.size());
}
bool ParseExample(StringRef serialized, Example* example) {
DCHECK(example != nullptr);
tf::protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8_t*>(serialized.str), serialized.len);
tensorflow::example::EnableAliasing(&stream);
return ParseExample(&stream, example);
}
Status FastParseSerializedExample(
StringRef serialized_example, const tstring& example_name,
const size_t example_index, const FastParseExampleConfig& config,
bool* quick_filter, int quick_filter_size,
const std::unique_ptr<ConfigIndex>& config_index, int config_index_size,
SeededHasher* hasher, std::vector<TfLiteTensor*>* output_dense,
std::vector<SparseBuffer>* output_varlen_dense,
std::vector<SparseBuffer>* output_sparse,
std::map<absl::string_view, int>& stats, TfLiteResult* result) {
DCHECK(output_dense != nullptr);
tensorflow::example::parsed::Example parsed_example;
if (!ParseExample(serialized_example, &parsed_example)) {
return tf::errors::Internal("Failed to parse example");
}
std::vector<int64_t> dense_feature_last_example(config.dense.size(), -1);
std::vector<int64_t> sparse_feature_last_example(config.sparse.size(), -1);
const size_t parsed_example_size = parsed_example.size();
for (size_t i = 0; i < parsed_example_size; ++i) {
tensorflow::example::parsed::FeatureMapEntry& name_and_feature =
parsed_example[parsed_example_size - i - 1];
const StringPiece feature_name = name_and_feature.first;
tensorflow::example::parsed::Feature& feature = name_and_feature.second;
if (feature_name.length() >= quick_filter_size ||
!quick_filter[feature_name.length()]) {
continue;
}
const uint64_t h = (*hasher)(feature_name);
std::pair<int32_t, Type> d_and_type;
if (!config_index->Find(h, &d_and_type)) {
continue;
}
size_t d = d_and_type.first;
bool is_dense = d_and_type.second == Type::Dense;
auto example_error = [&](StringPiece suffix) {
return tf::errors::Internal("Name: ", example_name,
", Key: ", feature_name,
", Index: ", example_index, ". ", suffix);
};
auto parse_error = [&] {
return example_error("Can't parse serialized Example.");
};
tf::DataType example_dtype;
if (feature.ParseDataType(&example_dtype) != absl::OkStatus()) {
return parse_error();
}
if (is_dense) {
if (example_dtype == tf::DT_INVALID) continue;
dense_feature_last_example[d] = example_index;
if (example_dtype != config.dense[d].dtype) {
return example_error(absl::StrCat(
"Data types don't match. Data type: ",
DataTypeString(example_dtype),
" but expected type: ", DataTypeString(config.dense[d].dtype)));
}
if (!config.dense[d].variable_length) {
TfLiteTensor* out = (*output_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
const std::size_t offset = example_index * num_elements;
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(absl::StrCat(
"Number of ", type_str,
" values != expected. "
"Values size:",
size,
" but output shape: ", config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
auto out_p = reinterpret_cast<int64_t*>(out->data.raw) + offset;
LimitedArraySlice<int64_t> slice(out_p, num_elements);
if (!feature.ParseInt64List(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "int64");
}
break;
}
case tf::DT_FLOAT: {
auto out_p = reinterpret_cast<float*>(out->data.raw) + offset;
LimitedArraySlice<float> slice(out_p, num_elements);
if (!feature.ParseFloatList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "float");
}
break;
}
case tf::DT_STRING: {
auto& out_tensor = result->dense_tensors[d];
auto out_p = out_tensor.flat<tstring>().data() + offset;
LimitedArraySlice<tstring> slice(out_p, num_elements);
if (!feature.ParseBytesList(&slice)) return parse_error();
if (slice.EndDistance() != 0) {
return shape_error(num_elements - slice.EndDistance(), "bytes");
}
break;
}
default:
return tf::errors::Internal("Unrecognized dense type: ",
config.dense[d].dtype);
}
} else {
SparseBuffer& out = (*output_varlen_dense)[d];
const std::size_t num_elements = config.dense[d].elements_per_stride;
if (example_dtype != tf::DT_INVALID &&
example_dtype != config.dense[d].dtype) {
return example_error(absl::StrCat(
"Data types don't match. ",
"Expected type: ", DataTypeString(config.dense[d].dtype)));
}
auto shape_error = [&](size_t size, StringPiece type_str) {
return example_error(
absl::StrCat("Number of ", type_str,
" values is not a multiple of stride length. Saw ",
size, " values but output shape is: ",
config.dense[d].shape.DebugString()));
};
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseInt64List(&out.int64_list)) {
return parse_error();
}
if (out.int64_list.size() % num_elements != 0) {
return shape_error(out.int64_list.size(), "int64");
}
}
out.example_end_indices.push_back(out.int64_list.size());
break;
}
case tf::DT_FLOAT: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseFloatList(&out.float_list)) {
return parse_error();
}
if (out.float_list.size() % num_elements != 0) {
return shape_error(out.float_list.size(), "float");
}
}
out.example_end_indices.push_back(out.float_list.size());
break;
}
case tf::DT_STRING: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseBytesList(&out.bytes_list)) {
return parse_error();
}
if (out.bytes_list.size() % num_elements != 0) {
return shape_error(out.bytes_list.size(), "byte");
}
}
out.example_end_indices.push_back(out.bytes_list.size());
break;
}
default:
return tf::errors::Internal("Should not happen: ",
config.dense[d].dtype);
}
}
} else {
auto& last_example = sparse_feature_last_example;
if (last_example[d] == example_index) {
continue;
}
last_example[d] = example_index;
SparseBuffer& out = (*output_sparse)[d];
tf::DataType feature_dtype = config.sparse[d].dtype;
if (example_dtype != tf::DT_INVALID && example_dtype != feature_dtype) {
return tf::errors::Internal("Data types don't match:", example_dtype,
" != ", feature_dtype);
}
switch (feature_dtype) {
case tf::DT_INT64: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseInt64List(&out.int64_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.int64_list.size());
break;
}
case tf::DT_FLOAT: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseFloatList(&out.float_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.float_list.size());
break;
}
case tf::DT_STRING: {
if (example_dtype != tf::DT_INVALID) {
if (!feature.ParseBytesList(&out.bytes_list)) {
return parse_error();
}
}
out.example_end_indices.push_back(out.bytes_list.size());
break;
}
default:
return tf::errors::Internal("Should not happen: ", feature_dtype);
}
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (config.dense[d].variable_length) continue;
if (dense_feature_last_example[d] == example_index) continue;
if (config.dense[d].default_value.NumElements() == 0) {
return tf::errors::Internal(
"Name: ", example_name, ", Feature: ", config.dense[d].feature_name,
" (data type: ", DataTypeString(config.dense[d].dtype), ")",
" is required but could not be found.");
}
const tf::Tensor& in = config.dense[d].default_value;
TfLiteTensor* out = result->dense_values[d];
const std::size_t num_elements = in.shape().num_elements();
const std::size_t offset = example_index * num_elements;
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
out->data.i64 + offset);
break;
}
case tf::DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
out->data.f + offset);
break;
}
case tf::DT_STRING: {
auto& out_tensor = result->dense_tensors[d];
std::copy_n(in.flat<tstring>().data(), num_elements,
out_tensor.flat<tstring>().data() + offset);
break;
}
default:
return tf::errors::Internal("Should not happen: ",
config.dense[d].dtype);
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (!config.dense[d].variable_length) continue;
if (dense_feature_last_example[d] == example_index) continue;
SparseBuffer& out = (*output_varlen_dense)[d];
size_t prev_example_end_index =
out.example_end_indices.empty() ? 0 : out.example_end_indices.back();
out.example_end_indices.push_back(prev_example_end_index);
}
for (size_t d = 0; d < config.sparse.size(); ++d) {
if (sparse_feature_last_example[d] == example_index) continue;
SparseBuffer& out = (*output_sparse)[d];
size_t prev_example_end_index =
out.example_end_indices.empty() ? 0 : out.example_end_indices.back();
out.example_end_indices.push_back(prev_example_end_index);
}
return absl::OkStatus();
}
void CountSparseFeatures(const SparseBuffer& sparse_buffer,
size_t* total_num_features, size_t* max_num_features) {
const std::vector<size_t>& end_indices = sparse_buffer.example_end_indices;
*total_num_features += end_indices.back();
*max_num_features = std::max(*max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
*max_num_features = std::max(*max_num_features, example_size);
}
}
void CopySparseBufferToTensor(tf::DataType dtype, size_t offset,
SparseBuffer* src, TfLiteTensor* dst) {
switch (dtype) {
case tf::DT_INT64: {
std::copy(src->int64_list.begin(), src->int64_list.end(),
reinterpret_cast<int64_t*>(dst->data.raw) + offset);
break;
}
case tf::DT_FLOAT: {
std::copy(src->float_list.begin(), src->float_list.end(),
reinterpret_cast<float*>(dst->data.raw) + offset);
break;
}
case tf::DT_STRING: {
DynamicBuffer buffer;
for (auto* begin = src->bytes_list.begin();
begin != src->bytes_list.end(); begin++) {
buffer.AddString(begin->c_str(), begin->size());
}
buffer.WriteToTensor(dst, nullptr);
break;
}
default:
DCHECK(false) << "Encountered unexpected DataType "
<< DataTypeString(dtype)
<< "in variable that should have been checked.";
}
}
inline void CopyToBuffer(absl::Span<const tstring> vec, char* tensor_buffer,
int num_examples, int batch_size,
int elements_per_stride) {
int i = 0, k = 0;
int start = 0;
for (; i < num_examples; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
memcpy(tensor_buffer + start, vec[k].c_str(), vec[k].size());
start += vec[k].size();
k++;
}
}
for (; i < batch_size; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
memcpy(tensor_buffer + start, vec[k].c_str(), vec[k].size());
start += vec[k].size();
k++;
}
}
}
Status FastParseExampleLite(
const FastParseExampleConfig& config, const TfLiteTensor* serialized,
absl::Span<const tstring> example_names, bool* quick_filter,
int quick_filter_size, const std::unique_ptr<ConfigIndex>& config_index,
int config_index_size, SeededHasher* hasher, TfLiteResult* result,
std::map<absl::string_view, int>& stats, TfLiteContext* context) {
if (result == nullptr) {
return tf::errors::Internal("Result is null");
}
const int count = GetStringCount(serialized);
std::vector<tf::Tensor> fixed_dense_values(config.dense.size());
std::vector<SparseBuffer> sparse_buffers(config.sparse.size());
std::vector<SparseBuffer> varlen_dense_buffers(config.dense.size());
Status status_of_minibatch;
for (size_t e = 0; e < count; ++e) {
status_of_minibatch = FastParseSerializedExample(
GetString(serialized, e),
(!example_names.empty() ? example_names[e] : "<unknown>"), e, config,
quick_filter, quick_filter_size, config_index, config_index_size,
hasher, &result->dense_values, &varlen_dense_buffers, &sparse_buffers,
stats, result);
if (!status_of_minibatch.ok()) break;
}
if (!status_of_minibatch.ok()) {
return status_of_minibatch;
}
for (size_t d = 0; d < config.sparse.size(); ++d) {
size_t total_num_features = 0;
size_t max_num_features = 0;
CountSparseFeatures(sparse_buffers[d], &total_num_features,
&max_num_features);
tf::TensorShape indices_shape;
TfLiteTensor* indices = result->sparse_indices[d];
TfLiteTensor* values = result->sparse_values[d];
TfLiteTensor* sparse_shape = result->sparse_shapes[d];
auto* sparse_shape_ptr = reinterpret_cast<int64_t*>(sparse_shape->data.raw);
sparse_shape_ptr[1] = max_num_features;
TfLiteIntArray* index_shape = TfLiteIntArrayCreate(2);
index_shape->data[0] = total_num_features;
index_shape->data[1] = 2;
context->ResizeTensor(context, indices, index_shape);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(1);
output_shape->data[0] = total_num_features;
context->ResizeTensor(context, values, output_shape);
SparseBuffer& buffer = sparse_buffers[d];
auto* indices_p = reinterpret_cast<int64_t*>(indices->data.raw);
if (!indices_p) {
return tf::errors::Internal("Indices tensor not allocated!");
}
if (total_num_features > 0) {
int64_t* ix_p = indices_p;
size_t example_index = 0;
int idx0 = 0;
size_t delta = 0;
for (size_t example_end_index : buffer.example_end_indices) {
size_t feature_index = 0;
for (; delta < example_end_index; ++delta) {
if (idx0 < total_num_features) {
*ix_p = example_index;
*(ix_p + 1) = feature_index;
ix_p += 2;
}
++feature_index;
++idx0;
}
++example_index;
}
CopySparseBufferToTensor(config.sparse[d].dtype, 0, &buffer, values);
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (!config.dense[d].variable_length) {
continue;
}
size_t max_num_features = 0;
std::vector<size_t>& end_indices =
varlen_dense_buffers[d].example_end_indices;
max_num_features = std::max(max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
max_num_features = std::max(max_num_features, example_size);
}
const size_t stride_size = config.dense[d].elements_per_stride;
const size_t max_num_elements = max_num_features / stride_size;
tf::TensorShape values_shape;
DCHECK_EQ(max_num_features % config.dense[d].elements_per_stride, 0);
const size_t batch_size = GetStringCount(serialized);
TF_RETURN_IF_ERROR(values_shape.AddDimWithStatus(batch_size));
TF_RETURN_IF_ERROR(values_shape.AddDimWithStatus(max_num_elements));
for (int i = 1; i < config.dense[d].shape.dims(); ++i) {
TF_RETURN_IF_ERROR(
values_shape.AddDimWithStatus(config.dense[d].shape.dim_size(i)));
}
TfLiteTensor* values = result->dense_values[d];
const size_t num_elements = GetTensorShape(values).FlatSize();
if (num_elements == 0) {
continue;
}
const size_t num_elements_per_minibatch = num_elements / batch_size;
switch (config.dense[d].dtype) {
case tf::DT_INT64: {
FillAndCopyVarLen<int64_t>(d, num_elements, num_elements_per_minibatch,
config, varlen_dense_buffers, values);
break;
}
case tf::DT_FLOAT: {
FillAndCopyVarLen<float>(d, num_elements, num_elements_per_minibatch,
config, varlen_dense_buffers, values);
break;
}
default:
DCHECK(false) << "Encountered unexpected DataType "
<< config.dense[d].dtype
<< "in variable that should have been checked";
}
}
for (size_t d = 0; d < config.dense.size(); ++d) {
if (config.dense[d].variable_length) {
continue;
}
if (result->dense_values[d]->type == kTfLiteString) {
auto& in = result->dense_tensors[d];
auto vec = in.vec<tstring>();
const int batch_size = result->dense_values[d]->dims->data[0];
const int elements_per_stride = config.dense[d].elements_per_stride;
int total_size = 0;
std::vector<int32_t> offsets;
offsets.reserve(vec.size() + 1);
offsets.push_back(0);
int k = 0;
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < elements_per_stride; ++j) {
if (i < count) {
total_size += vec(k++).size();
offsets.push_back(total_size);
} else {
offsets.push_back(total_size);
}
}
}
const int32_t num_strings = offsets.size() - 1;
const size_t required_bytes = sizeof(int32_t) * (num_strings + 2) +
total_size;
char* tensor_buffer =
reinterpret_cast<char*>(result->dense_values[d]->data.raw);
if (result->dense_values[d]->bytes < required_bytes) {
if (result->dense_values[d]->data.raw) {
free(result->dense_values[d]->data.raw);
}
tensor_buffer = reinterpret_cast<char*>(malloc(required_bytes));
result->dense_values[d]->data.raw = tensor_buffer;
result->dense_values[d]->bytes = required_bytes;
}
const int32_t start = sizeof(int32_t) * (num_strings + 2);
memcpy(tensor_buffer, &num_strings, sizeof(int32_t));
for (size_t i = 0; i < offsets.size(); i++) {
int32_t offset_i = start + offsets[i];
memcpy(tensor_buffer + sizeof(int32_t) * (i + 1), &offset_i,
sizeof(int32_t));
}
absl::Span<const tstring> slice(vec.data(), vec.size());
CopyToBuffer(slice, tensor_buffer + start, count, batch_size,
elements_per_stride);
}
}
return absl::OkStatus();
}
}
enum InputTensor {
kExampleTensor = 0,
kNamesTensor = 1,
kSparseKeysTensor = 2,
kDenseKeysTensor = 3,
kRaggedKeysTensor = 4,
};
struct OpData {
FastParseExampleConfig config;
std::vector<tf::TensorShape> dense_shapes;
int dense_size = 0;
int sparse_size = 0;
std::unique_ptr<ConfigIndex> config_index;
int config_index_size;
SeededHasher hasher;
TfLiteResult got;
bool* quick_filter = nullptr;
int quick_filter_size;
bool created = false;
~OpData() {
if (quick_filter) {
free(quick_filter);
}
}
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
template <typename T>
tf::Tensor AsTensor(const std::vector<T>& val) {
tf::Tensor ret(tf::DataTypeToEnum<T>::value,
{static_cast<int64_t>(val.size())});
std::copy_n(val.begin(), val.size(), ret.flat<T>().data());
return ret;
}
enum Version {
V1,
V2,
};
tf::TensorShape TfLiteToTfShape(TfLiteIntArray* array) {
tf::TensorShape shape;
for (int i = 0; i < array->size; i++) {
shape.AddDim(array->data[i]);
}
return shape;
}
template <Version version>
TfLiteStatus PrepareParseExample(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->custom_initial_data);
data->config.dense.clear();
data->config.sparse.clear();
data->got.dense_values.clear();
const flexbuffers::Vector& v =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(node->custom_initial_data),
node->custom_initial_data_size)
.AsVector();
if (v.size() == 2) {
tf::NodeDef nodedef;
TF_LITE_ENSURE_EQ(context, nodedef.ParseFromString(v[1].AsString().str()),
true);
if (version == V1) {
data->dense_size = nodedef.attr().at("Ndense").i();
data->sparse_size = nodedef.attr().at("Nsparse").i();
} else if (version == V2) {
data->dense_size = nodedef.attr().at("Tdense").list().type_size();
data->sparse_size = nodedef.attr().at("num_sparse").i();
}
auto dense_shapes = nodedef.attr().at("dense_shapes").list();
if (data->dense_shapes.empty()) {
for (int i = 0; i < dense_shapes.shape_size(); ++i) {
data->dense_shapes.push_back(dense_shapes.shape(i));
}
}
} else {
const flexbuffers::Map& m =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(node->custom_initial_data),
node->custom_initial_data_size)
.AsMap();
const flexbuffers::TypedVector keys = m.Keys();
int num_sparse = 0;
int num_dense = 0;
for (int k = 0; k < keys.size(); ++k) {
const std::string key = keys[k].ToString();
const auto value = m[key];
if (key == "Nsparse" || key == "num_sparse") {
num_sparse = value.AsInt32();
}
if (key == "Ndense") {
num_dense = value.AsInt32();
}
}
data->sparse_size = num_sparse;
data->dense_size = num_dense;
if (version == V2) {
const TfLiteTensor* dense_key_tensor =
GetInput(context, node, kDenseKeysTensor);
data->dense_size = GetTensorShape(dense_key_tensor).FlatSize();
}
}
data->config.dense.reserve(data->dense_size);
data->config.sparse.reserve(data->sparse_size);
data->dense_shapes.reserve(data->dense_size);
const auto* serialized = GetInput(context, node, 0);
const int batch_size =
serialized->dims->size > 0 ? serialized->dims->data[0] : 1;
const bool missing_shape_info = data->dense_shapes.empty();
for (int i = 0; i < data->dense_size; i++) {
TfLiteTensor* dense_key_tensor =
GetOutput(context, node, data->sparse_size * 3 + i);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(dense_key_tensor->dims);
if (missing_shape_info) {
data->dense_shapes.push_back(TfLiteToTfShape(output_size));
}
const int original_size = data->dense_shapes[i].dims() > 0
? data->dense_shapes[i].dim_size(0)
: 1;
output_size->data[0] = batch_size * original_size;
context->ResizeTensor(context, dense_key_tensor, output_size);
}
size_t offset = 0;
for (int i = 0; i < data->sparse_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(2);
sparse_size->data[0] = batch_size;
sparse_size->data[1] = 2;
context->ResizeTensor(context, parse_output, sparse_size);
data->got.sparse_indices.push_back(parse_output);
}
offset += data->sparse_size;
for (int i = 0; i < data->sparse_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(1);
sparse_size->data[0] = 0;
context->ResizeTensor(context, parse_output, sparse_size);
data->got.sparse_values.push_back(parse_output);
}
offset += data->sparse_size;
for (int i = 0; i < data->sparse_size; i++) {
TfLiteTensor* parse_output = GetOutput(context, node, i + offset);
SetTensorToDynamic(parse_output);
TfLiteIntArray* sparse_size = TfLiteIntArrayCreate(1);
sparse_size->data[0] = 2;
context->ResizeTensor(context, parse_output, sparse_size);
auto* shapes_shape_t = reinterpret_cast<int64_t*>(parse_output->data.i64);
shapes_shape_t[0] = batch_size;
shapes_shape_t[1] = 1;
data->got.sparse_shapes.push_back(parse_output);
}
data->created = false;
return kTfLiteOk;
}
template <Version version>
TfLiteStatus EvalParseExample(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (!data->created) {
for (int i = 0; i < data->sparse_size; i++) {
int input_index =
version == V1 ? kSparseKeysTensor + i : kSparseKeysTensor;
int string_index = version == V1 ? 0 : i;
const TfLiteTensor* sparse_key_tensor =
GetInput(context, node, input_index);
const auto key = GetString(sparse_key_tensor, string_index);
const auto* sparse_output =
GetOutput(context, node, i + data->sparse_size);
std::string k(key.str, key.len);
switch (sparse_output->type) {
case kTfLiteInt64:
data->config.sparse.emplace_back(k,
tf::DataTypeToEnum<int64_t>::value);
break;
case kTfLiteFloat32:
data->config.sparse.emplace_back(k, tf::DataTypeToEnum<float>::value);
break;
case kTfLiteString:
data->config.sparse.emplace_back(k,
tf::DataTypeToEnum<tstring>::value);
break;
default:
return kTfLiteError;
}
}
const auto& dense_shapes = data->dense_shapes;
for (int i = 0; i < data->dense_size; i++) {
const int input_index = version == V1
? kSparseKeysTensor + data->sparse_size + i
: kDenseKeysTensor;
const int dense_defaults_index =
version == V1
? kSparseKeysTensor + data->sparse_size + data->dense_size + i
: kRaggedKeysTensor + i + 1;
int string_index = version == V1 ? 0 : i;
const TfLiteTensor* dense_key_tensor =
GetInput(context, node, input_index);
const auto* dense_output =
GetOutput(context, node, i + data->sparse_size * 3);
const auto* dense_defaults =
GetInput(context, node, dense_defaults_index);
const auto key = GetString(dense_key_tensor, string_index);
std::string k(key.str, key.len);
const int elements_per_stride =
dense_shapes[i].dims() ? dense_shapes[i].num_elements() : 1;
switch (dense_output->type) {
case kTfLiteInt64:
data->config.dense.emplace_back(
k, tf::DataTypeToEnum<int64_t>::value, dense_shapes[i],
AsTensor<int64_t>(std::vector<int64_t>(
dense_defaults->data.i64,
dense_defaults->data.i64 + elements_per_stride)),
false, elements_per_stride);
break;
case kTfLiteFloat32:
data->config.dense.emplace_back(
k, tf::DataTypeToEnum<float>::value, dense_shapes[i],
AsTensor<float>(std::vector<float>(
dense_defaults->data.f,
dense_defaults->data.f + elements_per_stride)),
false, elements_per_stride);
break;
case kTfLiteString: {
const int num_strings = GetStringCount(dense_defaults);
std::vector<tstring> values;
for (int i = 0; i < num_strings; ++i) {
auto ref = GetString(dense_defaults, i);
values.emplace_back(ref.str, ref.len);
}
data->config.dense.emplace_back(
k, tf::DataTypeToEnum<tstring>::value, dense_shapes[i],
AsTensor<tstring>(values), false, elements_per_stride);
break;
}
default:
return kTfLiteError;
}
}
int offset = 3 * data->sparse_size;
for (int i = 0; i < data->dense_size; i++) {
auto* parse_output = GetOutput(context, node, i + offset);
data->got.dense_values.push_back(parse_output);
if (parse_output->type == kTfLiteString) {
tf::TensorShape shape;
if (parse_output->dims->size == 1) {
shape.AddDim(parse_output->dims->data[0]);
} else {
shape.AddDim(GetTensorShape(parse_output).FlatSize());
}
data->got.dense_tensors[i] =
tf::Tensor(tf::DataTypeToEnum<tstring>::value, shape);
}
}
size_t config_size = data->config.dense.size();
config_size += data->config.sparse.size();
data->config_index_size = config_size;
auto config_index = std::make_unique<ConfigIndex>(config_size);
bool ok = true;
int max_length = 0;
for (size_t d = 0; d < data->config.dense.size(); ++d) {
auto s = data->config.dense[d].feature_name;
max_length = s.length() > max_length ? s.length() : max_length;
}
for (size_t d = 0; d < data->config.sparse.size(); ++d) {
auto s = data->config.sparse[d].feature_name;
max_length = s.length() > max_length ? s.length() : max_length;
}
if (data->quick_filter) {
free(data->quick_filter);
}
data->quick_filter =
static_cast<bool*>(malloc(++max_length * sizeof(bool)));
memset(data->quick_filter, 0, max_length * sizeof(bool));
data->quick_filter_size = max_length;
for (size_t d = 0; d < data->config.dense.size(); ++d) {
const auto& s = data->config.dense[d].feature_name;
data->quick_filter[s.length()] = true;
}
for (size_t d = 0; d < data->config.sparse.size(); ++d) {
const auto& s = data->config.sparse[d].feature_name;
data->quick_filter[s.length()] = true;
}
for (int i = 0; i < 1000; ++i) {
for (size_t d = 0; d < data->config.dense.size(); ++d) {
ok &= config_index->InsertUnique(
data->hasher(data->config.dense[d].feature_name), {d, Type::Dense});
}
for (size_t d = 0; d < data->config.sparse.size(); ++d) {
ok &= config_index->InsertUnique(
data->hasher(data->config.sparse[d].feature_name),
{d, Type::Sparse});
}
if (ok) {
break;
}
data->hasher.seed++;
config_index->Clear(config_size);
ok = true;
}
if (!ok) {
return kTfLiteError;
}
data->config_index = std::move(config_index);
data->created = true;
}
const TfLiteTensor* serialized = GetInput(context, node, kExampleTensor);
std::map<absl::string_view, int> stats;
const auto status = FastParseExampleLite(
data->config, serialized, {}, data->quick_filter, data->quick_filter_size,
data->config_index, data->config_index_size, &data->hasher, &data->got,
stats, context);
if (status != absl::OkStatus()) {
TF_LITE_KERNEL_LOG(context, status.ToString().c_str());
return kTfLiteError;
}
return kTfLiteOk;
}
void Free(TfLiteContext* context, void* buffer) {
auto* obj = reinterpret_cast<OpData*>(buffer);
delete obj;
}
}
TfLiteRegistration* Register_PARSE_EXAMPLE() {
static TfLiteRegistration r = {
parse_example::Init, parse_example::Free,
parse_example::PrepareParseExample<parse_example::V1>,
parse_example::EvalParseExample<parse_example::V1>};
return &r;
}
TfLiteRegistration* Register_PARSE_EXAMPLE_V2() {
static TfLiteRegistration r = {
parse_example::Init, parse_example::Free,
parse_example::PrepareParseExample<parse_example::V2>,
parse_example::EvalParseExample<parse_example::V2>};
return &r;
}
extern "C" void AddParseExampleOp(::tflite::MutableOpResolver* resolver) {
resolver->AddCustom("ParseExample", Register_PARSE_EXAMPLE());
resolver->AddCustom("ParseExampleV2", Register_PARSE_EXAMPLE_V2());
}
}
}
} | #include "tensorflow/lite/kernels/parse_example/parse_example.h"
#include <cstdint>
#include <initializer_list>
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/example/feature_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace tf = ::tensorflow;
const char* kNodeDefTxt = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/dense_keys_0"
input: "ParseExample/Const"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_FLOAT } }
}
attr {
key: "dense_shapes"
value { list { shape { dim { size: 2 } } } }
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt2 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 0 }
}
attr {
key: "Nsparse"
value { i: 1 }
}
attr {
key: "Tdense"
value {}
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt3 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_STRING } }
}
attr {
key: "dense_shapes"
value { list { shape { dim { size: 1 } } } }
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
const char* kNodeDefTxt4 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/sparse_keys_0"
attr {
key: "Ndense"
value { i: 0 }
}
attr {
key: "Nsparse"
value { i: 1 }
}
attr {
key: "Tdense"
value {}
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_STRING } }
}
)pb";
const char* kNodeDefTxt5 = R"pb(
name: "ParseExample/ParseExample"
op: "ParseExample"
input: "serialized"
input: "ParseExample/ParseExample/names"
input: "ParseExample/ParseExample/dense_keys_0"
input: "ParseExample/Const"
attr {
key: "Ndense"
value { i: 1 }
}
attr {
key: "Nsparse"
value { i: 0 }
}
attr {
key: "Tdense"
value { list { type: DT_FLOAT } }
}
attr {
key: "dense_shapes"
value {}
}
attr {
key: "sparse_types"
value { list { type: DT_FLOAT } }
}
)pb";
template <typename DefaultType>
class ParseExampleOpModel : public SingleOpModel {
public:
ParseExampleOpModel(std::vector<std::string> serialized_examples,
std::vector<std::string> sparse_keys,
std::vector<std::string> dense_keys,
std::initializer_list<DefaultType> dense_defaults,
std::vector<TensorType> dense_types,
std::vector<TensorType> sparse_types,
const char* text_def, int dense_size = 2) {
const int input_size = serialized_examples.size();
auto input_tensor_data = TensorData(TensorType_STRING, {input_size});
string_indices_.push_back(AddInput(input_tensor_data));
string_indices_.push_back(
AddConstInput<std::string>(TensorData(TensorType_STRING, {0}), {""}));
std::for_each(sparse_keys.begin(), sparse_keys.end(), [&](auto&&) {
string_indices_.push_back(AddInput(TensorData(TensorType_STRING, {1})));
});
std::for_each(dense_keys.begin(), dense_keys.end(), [&](auto&&) {
string_indices_.push_back(AddInput(TensorData(TensorType_STRING, {1})));
});
if (dense_size > 0) {
dense_defaults_ = AddConstInput<DefaultType>(
TensorData(dense_types[0], {dense_size}), dense_defaults);
}
if (!sparse_keys.empty()) {
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_indices_outputs_.push_back(AddOutput(TensorType_INT64));
}
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_values_outputs_.push_back(AddOutput(sparse_types[i]));
}
for (int i = 0; i < sparse_keys.size(); i++) {
sparse_shapes_outputs_.push_back(AddOutput({TensorType_INT64, {2}}));
}
}
for (int i = 0; i < dense_keys.size(); i++) {
dense_outputs_.push_back(AddOutput({dense_types[i], {dense_size}}));
}
tf::NodeDef nodedef;
tf::protobuf::TextFormat::Parser parser;
tf::protobuf::io::ArrayInputStream input_stream(text_def, strlen(text_def));
if (!parser.Parse(&input_stream, &nodedef)) {
abort();
}
std::string serialized_nodedef;
nodedef.SerializeToString(&serialized_nodedef);
flexbuffers::Builder fbb;
fbb.Vector([&]() {
fbb.String(nodedef.op());
fbb.String(serialized_nodedef);
});
fbb.Finish();
const auto buffer = fbb.GetBuffer();
SetCustomOp("ParseExample", buffer, Register_PARSE_EXAMPLE);
BuildInterpreter({{input_size}});
int idx = 0;
PopulateStringTensor(string_indices_[idx++], serialized_examples);
PopulateStringTensor(string_indices_[idx++], {""});
for (const auto& key : sparse_keys) {
PopulateStringTensor(string_indices_[idx++], {key});
}
for (const auto& key : dense_keys) {
PopulateStringTensor(string_indices_[idx++], {key});
}
}
void ResizeInputTensor(std::vector<std::vector<int>> input_shapes) {
for (size_t i = 0; i < input_shapes.size(); ++i) {
const int input_idx = interpreter_->inputs()[i];
if (input_idx == kTfLiteOptionalTensor) continue;
const auto& shape = input_shapes[i];
if (shape.empty()) continue;
CHECK(interpreter_->ResizeInputTensor(input_idx, shape) == kTfLiteOk);
}
}
template <typename T>
std::vector<T> GetSparseIndicesOutput(int i) {
return ExtractVector<T>(sparse_indices_outputs_[i]);
}
template <typename T>
std::vector<T> GetSparseValuesOutput(int i) {
return ExtractVector<T>(sparse_values_outputs_[i]);
}
template <typename T>
std::vector<T> GetSparseShapesOutput(int i) {
return ExtractVector<T>(sparse_shapes_outputs_[i]);
}
template <typename T>
std::vector<T> GetDenseOutput(int i) {
return ExtractVector<T>(dense_outputs_[i]);
}
std::vector<std::string> GetStringOutput(int i) {
auto* t = interpreter_->tensor(i);
int count = GetStringCount(t);
std::vector<std::string> v;
for (int i = 0; i < count; ++i) {
auto ref = GetString(t, i);
v.emplace_back(ref.str, ref.len);
}
return v;
}
int DenseDefaults() { return dense_defaults_; }
int SparseValuesOutputs(int i) { return sparse_values_outputs_[i]; }
int DenseOutputs(int i) { return dense_outputs_[i]; }
std::vector<int> dense_outputs_;
std::vector<int> sparse_indices_outputs_;
std::vector<int> sparse_shapes_outputs_;
std::vector<int> sparse_values_outputs_;
std::vector<int> string_indices_;
int dense_defaults_ = -1;
};
TEST(ParseExampleOpsTest, SimpleTest) {
tf::Example example;
tf::AppendFeatureValues<float>({1.5f, 1.5f}, "time", &example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
ParseExampleOpModel<float> m({example.SerializeAsString()}, {}, {"time"},
{0.f, 0.f}, {TensorType_FLOAT32}, {},
kNodeDefTxt);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear({1.5f, 1.5f})));
}
TEST(ParseExampleOpsTest, SparseTest) {
tf::Example example;
tf::AppendFeatureValues<float>({1.5f}, "time", &example);
ParseExampleOpModel<float> m({example.SerializeAsString()}, {"time"}, {}, {},
{}, {TensorType_FLOAT32}, kNodeDefTxt2, 0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetSparseIndicesOutput<int64_t>(0),
ElementsAreArray(ArrayFloatNear({0, 0})));
EXPECT_THAT(m.GetSparseValuesOutput<float>(0),
ElementsAreArray(ArrayFloatNear({1.5f})));
EXPECT_THAT(m.GetSparseShapesOutput<int64_t>(0),
ElementsAreArray(ArrayFloatNear({1, 1})));
}
TEST(ParseExampleOpsTest, SimpleBytesTest) {
tf::Example example;
const std::string test_data = "simpletest";
tf::AppendFeatureValues<tensorflow::tstring>({test_data}, "time", &example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
std::string default_value = "missing";
ParseExampleOpModel<std::string> m({example.SerializeAsString()}, {},
{"time"}, {default_value},
{TensorType_STRING}, {}, kNodeDefTxt3, 1);
m.PopulateStringTensor(m.DenseDefaults(), {default_value});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<string> c = m.GetStringOutput(m.DenseOutputs(0));
EXPECT_EQ(1, c.size());
EXPECT_EQ(test_data, c[0]);
}
TEST(ParseExampleOpsTest, SparseBytesTest) {
tf::Example example;
const std::string test_data = "simpletest";
tf::AppendFeatureValues<tensorflow::tstring>({test_data, test_data}, "time",
&example);
tf::AppendFeatureValues<float>({1.0f, 1.0f}, "num", &example);
ParseExampleOpModel<std::string> m({example.SerializeAsString()}, {"time"},
{}, {}, {}, {TensorType_STRING},
kNodeDefTxt4, 0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetSparseIndicesOutput<int64_t>(0),
testing::ElementsAreArray({0, 0, 0, 1}));
auto values = m.GetStringOutput(m.SparseValuesOutputs(0));
EXPECT_EQ(2, values.size());
EXPECT_EQ(test_data, values[0]);
EXPECT_EQ(test_data, values[1]);
EXPECT_THAT(m.GetSparseShapesOutput<int64_t>(0),
testing::ElementsAreArray({1, 2}));
}
TEST(ParseExampleOpsTest, ResizeTest) {
const int num_tests = 3;
std::vector<tf::Example> examples(num_tests);
std::vector<std::vector<float>> expected(num_tests);
std::vector<std::vector<std::string>> inputs(num_tests);
std::vector<int> sizes;
for (int i = 0; i < num_tests; ++i) {
float val = i;
std::initializer_list<float> floats = {val + val / 10.f, -val - val / 10.f};
tf::AppendFeatureValues<float>({val, val}, "num", &examples[i]);
tf::AppendFeatureValues<float>(floats, "time", &examples[i]);
sizes.push_back((num_tests - i) * 2);
for (int j = 0; j < sizes.back(); ++j) {
inputs[i].push_back(examples[i].SerializeAsString());
expected[i].insert(expected[i].end(), floats.begin(), floats.end());
}
}
ParseExampleOpModel<float> m(inputs[0], {}, {"time"}, {0.f, 0.f},
{TensorType_FLOAT32}, {}, kNodeDefTxt);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[0])));
for (int i = 1; i < num_tests; ++i) {
m.ResizeInputTensor({{sizes[i]}});
m.AllocateAndDelegate(false);
m.PopulateStringTensor(0, inputs[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[i])));
}
}
TEST(ParseExampleOpsTest, ResizeMissingInfoTest) {
const int num_tests = 3;
std::vector<tf::Example> examples(num_tests);
std::vector<std::vector<float>> expected(num_tests);
std::vector<std::vector<std::string>> inputs(num_tests);
std::vector<int> sizes;
for (int i = 0; i < num_tests; ++i) {
float val = i;
std::initializer_list<float> floats = {val + val / 10.f, -val - val / 10.f};
tf::AppendFeatureValues<float>({val, val}, "num", &examples[i]);
tf::AppendFeatureValues<float>(floats, "time", &examples[i]);
sizes.push_back((num_tests - i) * 2);
for (int j = 0; j < sizes.back(); ++j) {
inputs[i].push_back(examples[i].SerializeAsString());
expected[i].insert(expected[i].end(), floats.begin(), floats.end());
}
}
ParseExampleOpModel<float> m(inputs[0], {}, {"time"}, {0.f, 0.f},
{TensorType_FLOAT32}, {}, kNodeDefTxt5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[0])));
for (int i = 1; i < num_tests; ++i) {
m.ResizeInputTensor({{sizes[i]}});
m.AllocateAndDelegate(false);
m.PopulateStringTensor(0, inputs[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDenseOutput<float>(0),
ElementsAreArray(ArrayFloatNear(expected[i])));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/parse_example/parse_example.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/parse_example/parse_example_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
905c7aba-44b7-45a2-bde2-d929d4789674 | cpp | google/quiche | hpack_string_decoder | quiche/http2/hpack/decoder/hpack_string_decoder.cc | quiche/http2/hpack/decoder/hpack_string_decoder_test.cc | #include "quiche/http2/hpack/decoder/hpack_string_decoder.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace http2 {
std::string HpackStringDecoder::DebugString() const {
return absl::StrCat("HpackStringDecoder(state=", StateToString(state_),
", length=", length_decoder_.DebugString(),
", remaining=", remaining_,
", huffman=", huffman_encoded_ ? "true)" : "false)");
}
std::string HpackStringDecoder::StateToString(StringDecoderState v) {
switch (v) {
case kStartDecodingLength:
return "kStartDecodingLength";
case kDecodingString:
return "kDecodingString";
case kResumeDecodingLength:
return "kResumeDecodingLength";
}
return absl::StrCat("UNKNOWN_STATE(", static_cast<uint32_t>(v), ")");
}
std::ostream& operator<<(std::ostream& out, const HpackStringDecoder& v) {
return out << v.DebugString();
}
} | #include "quiche/http2/hpack/decoder/hpack_string_decoder.h"
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/decoder/hpack_string_decoder_listener.h"
#include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/test_tools/hpack_string_collector.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
const bool kMayReturnZeroOnFirst = false;
const bool kCompressed = true;
const bool kUncompressed = false;
class HpackStringDecoderTest : public RandomDecoderTest {
protected:
HpackStringDecoderTest() : listener_(&collector_) {}
DecodeStatus StartDecoding(DecodeBuffer* b) override {
++start_decoding_calls_;
collector_.Clear();
return decoder_.Start(b, &listener_);
}
DecodeStatus ResumeDecoding(DecodeBuffer* b) override {
QUICHE_VLOG(1) << decoder_.DebugString();
QUICHE_VLOG(2) << collector_;
return decoder_.Resume(b, &listener_);
}
AssertionResult Collected(absl::string_view s, bool huffman_encoded) {
QUICHE_VLOG(1) << collector_;
return collector_.Collected(s, huffman_encoded);
}
Validator MakeValidator(const std::string& expected_str,
bool expected_huffman) {
return [expected_str, expected_huffman, this](
const DecodeBuffer& ,
DecodeStatus ) -> AssertionResult {
AssertionResult result = Collected(expected_str, expected_huffman);
if (result) {
HTTP2_VERIFY_EQ(collector_,
HpackStringCollector(expected_str, expected_huffman));
} else {
HTTP2_VERIFY_NE(collector_,
HpackStringCollector(expected_str, expected_huffman));
}
QUICHE_VLOG(2) << collector_.ToString();
collector_.Clear();
QUICHE_VLOG(2) << collector_;
return result;
};
}
HpackStringDecoder decoder_;
HpackStringCollector collector_;
HpackStringDecoderVLoggingListener listener_;
size_t start_decoding_calls_ = 0;
};
TEST_F(HpackStringDecoderTest, DecodeEmptyString) {
{
Validator validator = ValidateDoneAndEmpty(MakeValidator("", kCompressed));
const char kData[] = {'\x80'};
DecodeBuffer b(kData);
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
}
{
Validator validator =
ValidateDoneAndOffset(1, MakeValidator("", kUncompressed));
const char kData[] = {'\x00', '\xff'};
DecodeBuffer b(kData);
EXPECT_EQ(2u, b.Remaining());
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
EXPECT_EQ(1u, b.Remaining());
}
}
TEST_F(HpackStringDecoderTest, DecodeShortString) {
{
Validator validator =
ValidateDoneAndOffset(11, MakeValidator("start end.", kCompressed));
const char kData[] = "\x8astart end.Don't peek at this.";
DecodeBuffer b(kData);
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
}
{
Validator validator =
ValidateDoneAndOffset(11, MakeValidator("start end.", kUncompressed));
absl::string_view data("\x0astart end.");
DecodeBuffer b(data);
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
}
}
TEST_F(HpackStringDecoderTest, DecodeLongStrings) {
std::string name = Random().RandString(1024);
std::string value = Random().RandString(65536);
HpackBlockBuilder hbb;
hbb.AppendString(false, name);
uint32_t offset_after_name = hbb.size();
EXPECT_EQ(3 + name.size(), offset_after_name);
hbb.AppendString(true, value);
uint32_t offset_after_value = hbb.size();
EXPECT_EQ(3 + name.size() + 4 + value.size(), offset_after_value);
DecodeBuffer b(hbb.buffer());
EXPECT_TRUE(DecodeAndValidateSeveralWays(
&b, kMayReturnZeroOnFirst,
ValidateDoneAndOffset(offset_after_name,
MakeValidator(name, kUncompressed))));
EXPECT_EQ(offset_after_name, b.Offset());
EXPECT_EQ(offset_after_value - offset_after_name, b.Remaining());
EXPECT_TRUE(DecodeAndValidateSeveralWays(
&b, kMayReturnZeroOnFirst,
ValidateDoneAndOffset(offset_after_value - offset_after_name,
MakeValidator(value, kCompressed))));
EXPECT_EQ(offset_after_value, b.Offset());
EXPECT_EQ(0u, b.Remaining());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_string_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_string_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
08971125-e01f-4a75-81b3-81a1c6420689 | cpp | google/tensorstore | index_transform_builder | tensorstore/index_space/index_transform_builder.cc | tensorstore/index_space/index_transform_builder_test.cc | #include "tensorstore/index_space/index_transform_builder.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/integer_overflow.h"
namespace tensorstore {
namespace internal_index_space {
void InitializeTransformRepForBuilder(TransformRep* data) {
assert(data != nullptr);
const DimensionIndex output_rank = data->output_rank;
span<OutputIndexMap> maps = data->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
map.stride() = 0;
map.offset() = 0;
}
}
absl::Status SetOutputIndexMapsAndValidateTransformRep(
TransformRep* data, span<const OutputIndexMapInitializer> output_index_maps,
IntervalForm interval_form, BuilderFlags flags) {
const DimensionIndex input_rank = data->input_rank;
const DimensionIndex output_rank = data->output_rank;
assert(output_index_maps.size() == output_rank);
span<Index> input_origin = data->input_origin().first(input_rank);
span<Index> input_shape = data->input_shape().first(input_rank);
auto& implicit_lower_bounds = data->implicit_lower_bounds;
auto& implicit_upper_bounds = data->implicit_upper_bounds;
const auto implicit_mask = DimensionSet::UpTo(input_rank);
if ((flags & BuilderFlags::kSetLower) == BuilderFlags::kDefault) {
Index val =
(interval_form == IntervalForm::sized) &&
((flags & BuilderFlags::kSetUpper) == BuilderFlags::kSetUpper)
? 0
: -kInfIndex;
std::fill(input_origin.begin(), input_origin.end(), val);
}
if ((flags & BuilderFlags::kSetUpper) == BuilderFlags::kDefault) {
interval_form = IntervalForm::half_open;
std::fill(input_shape.begin(), input_shape.end(), kInfIndex + 1);
}
if ((flags & BuilderFlags::kSetImplicitLower) == BuilderFlags::kDefault) {
implicit_lower_bounds =
((flags & BuilderFlags::kSetLower) == BuilderFlags::kDefault) &&
interval_form != IntervalForm::sized;
}
if ((flags & BuilderFlags::kSetImplicitUpper) == BuilderFlags::kDefault) {
implicit_upper_bounds =
(flags & BuilderFlags::kSetUpper) == BuilderFlags::kDefault;
}
implicit_lower_bounds &= implicit_mask;
implicit_upper_bounds &= implicit_mask;
TENSORSTORE_RETURN_IF_ERROR(internal::ValidateDimensionLabelsAreUnique(
data->input_labels().first(input_rank)));
span<OutputIndexMap> maps = data->output_index_maps().first(output_rank);
switch (interval_form) {
case IntervalForm::sized:
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
Index& size = input_shape[input_dim];
if (size == kInfSize) {
size = kInfIndex + 1 - input_origin[input_dim];
}
TENSORSTORE_RETURN_IF_ERROR(
IndexInterval::Sized(input_origin[input_dim], size));
}
break;
case IntervalForm::closed:
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::Closed(input_origin[input_dim],
input_shape[input_dim]));
input_shape[input_dim] = interval.size();
}
break;
case IntervalForm::half_open:
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::HalfOpen(input_origin[input_dim],
input_shape[input_dim]));
input_shape[input_dim] = interval.size();
}
break;
default:
ABSL_UNREACHABLE();
}
const bool domain_is_explicitly_empty = IsDomainExplicitlyEmpty(data);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& initializer = output_index_maps[output_dim];
auto& map = maps[output_dim];
if (initializer.index_array.valid()) {
TENSORSTORE_RETURN_IF_ERROR(initializer.index_array_bounds);
span<const Index> shape = initializer.index_array.shape();
const Index* byte_strides = initializer.index_array.byte_strides().data();
if (shape.size() != input_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Index array for output dimension ", output_dim, " has rank ",
shape.size(), " but must have rank ", input_rank));
}
auto& index_array_data = map.SetArrayIndexing(shape.size());
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
const Index array_dim_size = shape[input_dim];
if (array_dim_size == 1) {
index_array_data.byte_strides[input_dim] = 0;
continue;
}
const Index input_size = input_shape[input_dim];
if (array_dim_size != input_size) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Index array for output dimension ", output_dim, " has shape ",
shape, " which does not match input_shape ", input_shape));
}
if (byte_strides[input_dim] == 0 && array_dim_size != 0) {
index_array_data.byte_strides[input_dim] = 0;
continue;
}
if (implicit_lower_bounds[input_dim] ||
implicit_upper_bounds[input_dim]) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Index array for output dimension ",
output_dim, " depends on input dimension ",
input_dim, " with implicit bounds"));
}
if (!IsFinite(IndexInterval::UncheckedSized(input_origin[input_dim],
input_size))) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Index array for output dimension ",
output_dim, " depends on input dimension ",
input_dim, " with infinite bounds"));
}
index_array_data.byte_strides[input_dim] = byte_strides[input_dim];
}
if (domain_is_explicitly_empty) {
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
} else {
index_array_data.index_range = *initializer.index_array_bounds;
index_array_data.element_pointer = AddByteOffset(
initializer.index_array.element_pointer(),
internal::wrap_on_overflow::Subtract(
initializer.index_array.layout().origin_byte_offset(),
IndexInnerProduct(input_rank, input_origin.data(),
index_array_data.byte_strides)));
}
} else if (initializer.input_dimension) {
const DimensionIndex input_dim = *initializer.input_dimension;
if (input_dim < 0 || input_dim >= input_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Input dimension ", input_dim, " specified for output dimension ",
output_dim, " is outside valid range [0, ", input_rank, ")"));
}
if (map.stride() == 0) {
map.SetConstant();
} else {
map.SetSingleInputDimension(input_dim);
}
} else {
map.SetConstant();
map.stride() = 0;
}
}
internal_index_space::DebugCheckInvariants(data);
return absl::OkStatus();
}
}
} | #include "tensorstore/index_space/index_transform_builder.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TransformAccess;
TEST(IndexTransformTest, BuilderValid) {
auto index_array = MakeArray<Index>({{{1, 0, 2, 2}}});
auto t =
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, index_array, IndexInterval::Closed(0, 3))
.Finalize()
.value();
static_assert(std::is_same_v<decltype(t), IndexTransform<3, 4>>);
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2, 3));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 2, 4));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("x", "y", "z"));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 1, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 0, 0}));
EXPECT_EQ(IndexInterval::UncheckedSized(1, 2),
t.input_domain()[0].interval());
EXPECT_EQ(IndexInterval::UncheckedSized(2, 2),
t.input_domain()[1].interval());
EXPECT_EQ(IndexInterval::UncheckedSized(3, 4),
t.input_domain()[2].interval());
{
auto map = t.output_index_map(0);
EXPECT_EQ(OutputIndexMethod::constant, map.method());
EXPECT_EQ(4, map.offset());
EXPECT_EQ(0, map.stride());
}
{
auto map = t.output_index_map(1);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(2, map.input_dimension());
EXPECT_EQ(5, map.offset());
EXPECT_EQ(7, map.stride());
}
{
auto map = t.output_index_map(2);
EXPECT_EQ(OutputIndexMethod::constant, map.method());
EXPECT_EQ(6, map.offset());
EXPECT_EQ(0, map.stride());
}
{
auto map = t.output_index_map(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(7, map.offset());
EXPECT_EQ(9, map.stride());
auto index_array_ref = map.index_array();
EXPECT_EQ(&index_array(0, 0, 0), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_THAT(index_array_ref.layout().byte_strides(),
::testing::ElementsAre(0, 0, sizeof(Index)));
}
{
std::array<Index, 4> output_indices;
ASSERT_EQ(
absl::OkStatus(),
t.TransformIndices(span<const Index, 3>({1, 2, 3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(4, 26, 6, 16));
}
}
TEST(IndexTransformBuilderTest, Nullptr) {
IndexTransformBuilder<> builder(nullptr);
EXPECT_FALSE(builder.valid());
{
IndexTransformBuilder<> other_builder(builder);
EXPECT_FALSE(other_builder.valid());
}
{
IndexTransformBuilder<> other_builder(nullptr);
other_builder = builder;
EXPECT_FALSE(other_builder.valid());
}
}
TEST(IndexTransformBuilderTest, Move) {
IndexTransformBuilder<> builder(1, 1);
EXPECT_TRUE(builder.valid());
builder.input_origin({1});
auto builder2 = std::move(builder);
EXPECT_TRUE(builder2.valid());
EXPECT_FALSE(builder.valid());
builder2.output_constant(0, 5);
EXPECT_THAT(builder2.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 5)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, Copy) {
IndexTransformBuilder<> builder(1, 1);
EXPECT_TRUE(builder.valid());
builder.input_origin({1});
auto builder2 = builder;
EXPECT_TRUE(builder.valid());
EXPECT_TRUE(builder2.valid());
builder.output_constant(0, 4);
builder2.output_constant(0, 5);
EXPECT_THAT(builder.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 4)
.Finalize()
.value());
EXPECT_THAT(builder2.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 5)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, Default) {
auto t = IndexTransformBuilder<>(2, 1).Finalize().value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(-kInfIndex, -kInfIndex));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(kInfSize, kInfSize));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
auto map = t.output_index_map(0);
EXPECT_EQ(0, map.offset());
EXPECT_EQ(0, map.stride());
EXPECT_EQ(OutputIndexMethod::constant, map.method());
}
TEST(IndexTransformBuilderTest, InputOriginSpecified) {
auto t =
IndexTransformBuilder<>(2, 0).input_origin({1, 2}).Finalize().value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(1, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, ImplicitLowerBoundsSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.implicit_lower_bounds({1, 0})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputShapeSpecified) {
auto t =
IndexTransformBuilder<>(2, 0).input_shape({5, 10}).Finalize().value();
EXPECT_EQ(t.domain()[0].interval(), IndexInterval::UncheckedSized(0, 5));
EXPECT_EQ(t.domain()[1].interval(), IndexInterval::UncheckedSized(0, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputInclusiveMaxSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.input_inclusive_max({5, 10})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, 5));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputExclusiveMaxSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.input_exclusive_max({5, 10})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedHalfOpen(-kInfIndex, 5));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedHalfOpen(-kInfIndex, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, ImplicitUpperBoundsSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.implicit_upper_bounds({1, 0})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, SingleInputDimensionDefaults) {
EXPECT_EQ(IndexTransformBuilder<>(3, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, InputOriginOutOfRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_origin({-kInfIndex - 1, -kInfIndex})
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid half-open index interval"));
}
TEST(IndexTransformBuilderTest, InputShapeOutOfRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1).input_shape({1, -1}).Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\\(0, -1\\) do not specify a valid sized index interval"));
}
TEST(IndexTransformBuilderTest, InvalidInputDimensionNegative) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.output_single_input_dimension(0, 0, 1, -1)
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension -1 specified for output dimension 0 "
"is outside valid range \\[0, 2\\)"));
}
TEST(IndexTransformBuilderTest, InvalidInputDimensionPositive) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.output_single_input_dimension(0, 2)
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension 2 specified for output dimension 0 "
"is outside valid range \\[0, 2\\)"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayRank) {
EXPECT_THAT(IndexTransformBuilder<>(2, 1)
.output_index_array(0, 0, 1, MakeArray<Index>({1}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"has rank 1 but must have rank 2"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayShape) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({2, 2})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 has shape \\{3, 2\\} "
"which does not match input_shape \\{2, 2\\}"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayImplicitLowerBound) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({3, 2})
.implicit_lower_bounds({1, 0})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with implicit bounds"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayImplicitUpperBound) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({3, 2})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with implicit bounds"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayIndexRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2}, {3, 4}}),
IndexInterval::Sized(3, -1))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\\(3, -1\\) do not specify a valid sized index interval"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayWithUnboundedDomain) {
EXPECT_THAT(
IndexTransformBuilder(1, 1)
.input_origin({tensorstore::kMaxFiniteIndex})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with infinite bounds"));
}
TEST(IndexTransformBuilderDeathTest, InvalidArguments) {
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_origin({1, 2, 3})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_shape({1, 2, 3})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).implicit_lower_bounds({1, 1, 0})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).implicit_upper_bounds({1, 1, 0})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_labels({"a"})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).output_constant(1, 0)),
"invalid output dimension");
}
TEST(IndexTransformBuilderTest, OutputStrideZero) {
auto t = IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(0, 1, 0, 0)
.Finalize()
.value();
auto map = t.output_index_map(0);
EXPECT_EQ(1, map.offset());
EXPECT_EQ(0, map.stride());
EXPECT_EQ(OutputIndexMethod::constant, map.method());
}
TEST(IndexTransformBuilderTest, InclusiveMax) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_inclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(3, 4));
}
TEST(IndexTransformBuilderTest, InputShapeInfSize) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, kInfSize})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(3, kInfIndex + 1 - 2));
}
TEST(IndexTransformBuilderTest, ExclusiveMax) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, ExclusiveMaxAfterShape) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({15, 16})
.input_exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, InputDomainBox) {
auto t = IndexTransformBuilder<>(2, 2)
.input_bounds(tensorstore::BoxView({1, 2}, {2, 3}))
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, InputDomain) {
tensorstore::IndexDomain<2> domain(IndexTransformBuilder<2, 0>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.Finalize()
.value()
.domain());
auto t =
IndexTransformBuilder<>(2, 2).input_domain(domain).Finalize().value();
EXPECT_EQ(domain, t.domain());
}
TEST(IndexTransformBuilderTest, OutputIdentityTransform) {
EXPECT_THAT(
IndexTransformBuilder(2, 2).output_identity_transform().Finalize(),
::testing::Optional(tensorstore::IdentityTransform(2)));
EXPECT_EQ(IndexTransformBuilder(3, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder(3, 2)
.output_identity_transform()
.Finalize()
.value());
EXPECT_EQ(IndexTransformBuilder(2, 3)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_constant(2, 0)
.Finalize()
.value(),
IndexTransformBuilder(2, 3)
.output_identity_transform()
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, CopyOutputMap) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize());
EXPECT_THAT(IndexTransformBuilder(3, 4)
.input_domain(t.domain())
.output_maps(t.output_index_maps())
.Finalize(),
::testing::Optional(t));
EXPECT_THAT(IndexTransformBuilder(3, 4)
.input_domain(t.domain())
.output_constant(0, 4)
.output_map(1, t.output_index_maps()[1])
.output_map(2, t.output_index_maps()[2])
.output_map(3, t.output_index_maps()[3])
.Finalize(),
::testing::Optional(t));
}
TEST(InitializeTransformRepForBuilder, Basic) {
auto source = tensorstore::internal_index_space::TransformRep::Allocate(1, 2);
source->output_rank = 2;
tensorstore::internal_index_space::InitializeTransformRepForBuilder(
source.get());
EXPECT_EQ(0, source->output_index_maps()[0].offset());
EXPECT_EQ(0, source->output_index_maps()[0].stride());
EXPECT_EQ(0, source->output_index_maps()[1].offset());
EXPECT_EQ(0, source->output_index_maps()[1].stride());
}
TEST(IndexTransformBuilder, NonUniqueLabels) {
EXPECT_THAT(
IndexTransformBuilder<>(3, 0).input_labels({"a", "", "a"}).Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label\\(s\\) \"a\" not unique"));
}
TEST(IndexTransformBuilderTest, IndexArrayWithEmptyExplicitDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected,
IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_constant(0, 0)
.output_constant(1, 1)
.Finalize());
EXPECT_THAT(IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{2, 3}}))
.output_constant(1, 1)
.Finalize(),
::testing::Optional(expected));
}
TEST(IndexDomainBuilderTest, Null) {
IndexDomainBuilder builder(nullptr);
EXPECT_FALSE(builder.valid());
}
TEST(IndexDomainBuilderTest, Basic) {
IndexDomainBuilder builder(3);
EXPECT_EQ(3, builder.rank());
builder.origin(span<const Index, 3>({1, 2, 3}));
EXPECT_THAT(builder.origin(), ::testing::ElementsAre(1, 2, 3));
builder.shape(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.shape(), ::testing::ElementsAre(4, 5, 6));
builder.exclusive_max(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.exclusive_max(), ::testing::ElementsAre(4, 5, 6));
builder.inclusive_max(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.inclusive_max(), ::testing::ElementsAre(4, 5, 6));
builder.implicit_lower_bounds({0, 1, 1});
builder.implicit_upper_bounds({1, 0, 1});
EXPECT_THAT(builder.implicit_lower_bounds(),
DimensionSet::FromBools({0, 1, 1}));
EXPECT_THAT(builder.implicit_upper_bounds(),
DimensionSet::FromBools({1, 0, 1}));
builder.labels(std::vector<std::string>{"x", "y", "z"});
EXPECT_THAT(builder.labels(), ::testing::ElementsAre("x", "y", "z"));
}
TEST(IndexDomainBuilderTest, Labels) {
auto d = IndexDomainBuilder(2).labels({"x", "y"}).Finalize().value();
EXPECT_THAT(d.labels(), ::testing::ElementsAre("x", "y"));
}
TEST(IndexDomainBuilderTest, InclusiveMax) {
auto d = IndexDomainBuilder(2)
.origin({1, 2})
.inclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 4));
}
TEST(IndexDomainBuilderTest, Shape) {
auto d =
IndexDomainBuilder(2).origin({1, 2}).shape({3, 5}).Finalize().value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 5));
}
TEST(IndexDomainBuilderTest, ExclusiveMax) {
auto d = IndexDomainBuilder(2)
.origin({1, 2})
.exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexDomainBuilderTest, InputDomainBox) {
auto d = IndexDomainBuilder(2)
.bounds(tensorstore::BoxView({1, 2}, {2, 3}))
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexDomainBuilderTest, InputDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(tensorstore::IndexDomain<2> domain,
IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.labels({"x", "y"})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto d, IndexDomainBuilder<>(2).domain(domain).Finalize());
EXPECT_EQ(domain, d);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform_builder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform_builder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5c8c878a-fc1d-4763-a675-7908c10fe949 | cpp | tensorflow/tensorflow | unique | tensorflow/lite/kernels/unique.cc | tensorflow/lite/kernels/unique_test.cc | #include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unique {
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Free(TfLiteContext* context, void* buffer) {}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
static const int kOutputUniqueTensor = 0;
static const int kOutputIndexTensor = 1;
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output_unique_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputUniqueTensor,
&output_unique_tensor));
TfLiteTensor* output_index_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputIndexTensor,
&output_index_tensor));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
TfLiteIntArray* output_index_shape = TfLiteIntArrayCopy(input->dims);
SetTensorToDynamic(output_unique_tensor);
return context->ResizeTensor(context, output_index_tensor,
output_index_shape);
}
namespace {
template <typename T, typename I>
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input,
TfLiteNode* node) {
std::map<T, int> unique_values;
TfLiteTensor* output_indexes;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &output_indexes));
std::vector<T> output_values;
I* indexes = GetTensorData<I>(output_indexes);
const T* data = GetTensorData<T>(input);
const int num_elements = NumElements(input);
for (int i = 0; i < num_elements; ++i) {
const auto element_it = unique_values.find(data[i]);
if (element_it != unique_values.end()) {
indexes[i] = element_it->second;
} else {
const int unique_index = unique_values.size();
unique_values[data[i]] = unique_index;
indexes[i] = unique_index;
output_values.push_back(data[i]);
}
}
TfLiteTensor* unique_output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &unique_output));
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
TfLiteIntArrayCreate(NumDimensions(input)), TfLiteIntArrayFree);
shape->data[0] = unique_values.size();
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, unique_output, shape.release()));
T* output_unique_values = GetTensorData<T>(unique_output);
for (int i = 0; i < output_values.size(); ++i) {
output_unique_values[i] = output_values[i];
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input,
TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteUniqueParams*>(node->builtin_data);
if (params == nullptr) {
TF_LITE_KERNEL_LOG(context, "Null params passed");
return kTfLiteError;
}
switch (params->index_out_type) {
case kTfLiteInt32:
return EvalImpl<T, int32_t>(context, input, node);
case kTfLiteInt64:
return EvalImpl<T, int64_t>(context, input, node);
default:
TF_LITE_KERNEL_LOG(
context,
"Unique index output array can only be Int32 or In64, requested: %s",
TfLiteTypeGetName(params->index_out_type));
}
return kTfLiteError;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output_index_tensor;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, 1, &output_index_tensor));
TF_LITE_ENSURE_EQ(context, NumElements(output_index_tensor),
NumElements(input));
switch (input->type) {
case kTfLiteInt8:
TF_LITE_ENSURE_STATUS(EvalImpl<int8_t>(context, input, node));
break;
case kTfLiteInt16:
TF_LITE_ENSURE_STATUS(EvalImpl<int16_t>(context, input, node));
break;
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(EvalImpl<int32_t>(context, input, node));
break;
case kTfLiteInt64:
TF_LITE_ENSURE_STATUS(EvalImpl<int64_t>(context, input, node));
break;
case kTfLiteFloat32:
TF_LITE_ENSURE_STATUS(EvalImpl<float>(context, input, node));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_STATUS(EvalImpl<uint8_t>(context, input, node));
break;
default:
TF_LITE_KERNEL_LOG(context, "Currently Unique doesn't support type: %s",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_UNIQUE() {
static TfLiteRegistration r = {unique::Init, unique::Free, unique::Prepare,
unique::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <typename T, typename I>
class UniqueOpModel : public SingleOpModel {
public:
UniqueOpModel(const TensorData& input, TensorType input_type,
TensorType index_out_type) {
input_id_ = AddInput(input);
output_id_ = AddOutput(input_type);
output_index_id_ = AddOutput(index_out_type);
SetBuiltinOp(BuiltinOperator_UNIQUE, BuiltinOptions_UniqueOptions,
CreateUniqueOptions(builder_, index_out_type).Union());
BuildInterpreter({GetShape(input_id_)});
}
int input_tensor_id() { return input_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
std::vector<I> GetIndexesOutput() {
return ExtractVector<I>(output_index_id_);
}
protected:
int input_id_;
int output_id_;
int output_index_id_;
};
TEST(UniqueOpModelTest, OneElement) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {1}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(), {5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_THAT(model.GetIndexesOutput(), ElementsAreArray({0}));
}
TEST(UniqueOpModelTest, MultipleElements_AllUnique) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {8}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(),
{5, 2, 3, 51, 6, 72, 7, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5, 2, 3, 51, 6, 72, 7, 8}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7}));
}
TEST(UniqueOpModelTest, MultipleElements_AllDuplicates) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {7}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(), {5, 5, 5, 5, 5, 5, 5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 0}));
}
TEST(UniqueOpModelTest, MultipleElements_SomeDuplicates) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {7}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(), {2, 3, 5, 7, 2, 7, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({2, 3, 5, 7}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 3, 1}));
}
TEST(UniqueOpModelTest, MultipleElements_RepeatedDuplicates) {
UniqueOpModel<float, int32_t> model({TensorType_FLOAT32, {6}},
TensorType_FLOAT32, TensorType_INT32);
model.PopulateTensor<float>(model.input_tensor_id(),
{-1, -1, -2, -2, -3, -3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({-1, -2, -3}));
EXPECT_THAT(model.GetIndexesOutput(), ElementsAreArray({0, 0, 1, 1, 2, 2}));
}
TEST(UniqueOpModelTest, MultipleElements_SomeDuplicates_IndexInt64) {
UniqueOpModel<float, int64_t> model({TensorType_FLOAT32, {7}},
TensorType_FLOAT32, TensorType_INT64);
model.PopulateTensor<float>(model.input_tensor_id(), {2, 3, 5, 7, 2, 7, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({2, 3, 5, 7}));
EXPECT_THAT(model.GetIndexesOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 3, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unique.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unique_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dda2eaf-2fed-45a0-a247-17e9f1f0761c | cpp | tensorflow/tensorflow | filter_parallelization | tensorflow/core/grappler/optimizers/data/filter_parallelization.cc | tensorflow/core/grappler/optimizers/data/filter_parallelization_test.cc | #include "tensorflow/core/grappler/optimizers/data/filter_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFilterDataset[] = "FilterDataset";
constexpr char kParallelFilterDataset[] = "ParallelFilterDataset";
NodeDef MakeParallelFilter(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_filter = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelFilterDataset, graph->graph(),
¶llel_filter);
parallel_filter.set_op(kParallelFilterDataset);
auto* num_parallel_calls = graph_utils::AddScalarConstNode(
static_cast<int64_t>(data::model::kAutotune), graph);
parallel_filter.add_input(num_parallel_calls->name());
AddNodeAttr("deterministic", "true", ¶llel_filter);
return parallel_filter;
}
}
Status FilterParallelization::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization filter_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kFilterDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* filter_node = get_filter_node(node);
if (!filter_node) continue;
auto* function = function_library.Find(
filter_node->attr().at("predicate").func().name());
if (function_utils::IsFunctionStateful(function_library, *function, true)) {
continue;
}
auto* parallel_filter =
graph.AddNode(MakeParallelFilter(filter_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(filter_node->name(), parallel_filter->name()));
nodes_to_delete.insert(filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(FilterParallelization, "filter_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/filter_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithFilterParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
FilterParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeFilterNode;
const char stateless_fun_name[] = "NonZero";
const char stateful_fun_name[] = "RandomUniformLess";
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, FilterParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter", "range", stateless_fun_name),
NDef("Sink", "Identity", {"filter"}, {})},
{
test::function::NonZero(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("filter", output),
!autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, FilterParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter", "range", stateless_fun_name),
NDef("Sink", op, {"filter"}, {})},
{
test::function::NonZero(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("filter", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
TEST(ParallelizeAssert, FilterParallelizationTest) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range", stateful_fun_name),
MakeFilterNode("filter2", "filter1", stateless_fun_name),
NDef("cache", "CacheDataset", {"filter2", "filename"}, {}),
NDef("Sink", "Identity", {"cache"}, {})},
{
test::function::NonZero(),
test::function::RandomUniformLess(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/filter_parallelization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/filter_parallelization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6ee76f1-6ebb-49a4-b2a3-6cc5517b6271 | cpp | tensorflow/tensorflow | gpu_hlo_schedule | third_party/xla/xla/service/gpu/gpu_hlo_schedule.cc | third_party/xla/xla/service/gpu/gpu_hlo_schedule_test.cc | #include "xla/service/gpu/gpu_hlo_schedule.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/buffer_value.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/gpu/model/analytical_latency_estimator.h"
#include "xla/service/gpu/transforms/pgle_accuracy_checker.h"
#include "xla/service/gpu/transforms/schedule_postprocessing.h"
#include "xla/service/gpu/transforms/scheduling_instruction_annotator.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/service/p2p_schedule_preparation.h"
#include "xla/service/profile_guided_latency_estimator.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace gpu {
namespace {
bool ShouldScheduleAsEarlyAsPossible(const HloInstruction& instr) {
switch (instr.opcode()) {
case HloOpcode::kAllReduceStart:
case HloOpcode::kCollectivePermuteStart:
return !IsSyncCollective(&instr);
case HloOpcode::kAsyncStart:
return true;
case HloOpcode::kCustomCall:
return static_cast<const HloCustomCallInstruction&>(instr)
.custom_call_schedule() ==
CustomCallSchedule::SCHEDULE_EARLIEST;
default:
return false;
}
}
bool ShouldScheduleSuccessor(const HloInstruction& sussessor,
const HloPredicate& is_scheduled) {
return ShouldScheduleAsEarlyAsPossible(sussessor) &&
absl::c_all_of(sussessor.operands(), is_scheduled) &&
absl::c_all_of(sussessor.control_predecessors(), is_scheduled);
}
bool ShouldScheduleAsLateAsPossible(const HloInstruction& instr) {
switch (instr.opcode()) {
case HloOpcode::kAllReduceDone:
case HloOpcode::kCollectivePermuteDone:
return ShouldScheduleAsEarlyAsPossible(*instr.operand(0));
case HloOpcode::kAsyncDone:
return true;
case HloOpcode::kCustomCall:
return static_cast<const HloCustomCallInstruction&>(instr)
.custom_call_schedule() == CustomCallSchedule::SCHEDULE_LATEST;
default:
return false;
}
}
bool ShouldSchedulePredecessor(const HloInstruction& predecessor,
const HloPredicate& is_scheduled) {
return ShouldScheduleAsLateAsPossible(predecessor) &&
absl::c_all_of(predecessor.users(), is_scheduled) &&
absl::c_all_of(predecessor.control_successors(), is_scheduled);
}
HloInstructionSequence PostprocessorToScheduleAsEarlyOrLateAsPossible(
const HloInstructionSequence& input) {
std::vector<HloInstruction*> earliest_scheduled;
{
absl::flat_hash_set<HloInstruction*> scheduled;
auto is_scheduled = [&](const HloInstruction* instr) -> bool {
return scheduled.contains(instr);
};
auto add_to_schedule = [&](HloInstruction* instr) {
earliest_scheduled.push_back(instr);
scheduled.insert(instr);
};
for (HloInstruction* instr : input.instructions()) {
if (is_scheduled(instr)) continue;
add_to_schedule(instr);
for (HloInstruction* user : instr->users()) {
if (is_scheduled(user)) continue;
if (ShouldScheduleSuccessor(*user, is_scheduled)) {
add_to_schedule(user);
}
}
for (HloInstruction* successor : instr->control_successors()) {
if (is_scheduled(successor)) continue;
if (ShouldScheduleSuccessor(*successor, is_scheduled)) {
add_to_schedule(successor);
}
}
}
}
std::deque<HloInstruction*> latest_scheduled;
{
absl::flat_hash_set<HloInstruction*> scheduled;
auto is_scheduled = [&](const HloInstruction* instr) -> bool {
return scheduled.contains(instr);
};
auto add_to_schedule = [&](HloInstruction* instr) {
latest_scheduled.push_front(instr);
scheduled.insert(instr);
};
for (auto it = earliest_scheduled.rbegin(); it != earliest_scheduled.rend();
it++) {
if (is_scheduled(*it)) continue;
add_to_schedule(*it);
for (HloInstruction* operand : (*it)->operands()) {
if (is_scheduled(operand)) continue;
if (ShouldSchedulePredecessor(*operand, is_scheduled)) {
add_to_schedule(operand);
}
}
for (HloInstruction* predecessor : (*it)->control_predecessors()) {
if (is_scheduled(predecessor)) continue;
if (ShouldSchedulePredecessor(*predecessor, is_scheduled)) {
add_to_schedule(predecessor);
}
}
}
}
HloInstructionSequence result;
absl::c_for_each(latest_scheduled,
[&](HloInstruction* i) { result.push_back(i); });
CHECK(input.instructions().size() == result.size())
<< "schedule as early or late post-processing changed schedule size from "
<< input.instructions().size() << " to " << result.size();
return result;
}
HloInstructionSequence PostprocessorToScheduleSyncCollectives(
const HloInstructionSequence& input) {
HloInstructionSequence result;
auto is_sync_start = [](const HloInstruction* instr) {
return hlo_query::IsAsyncCollectiveStartOp(instr,
true) &&
IsSyncCollective(instr);
};
for (HloInstruction* instr : input.instructions()) {
if (is_sync_start(instr)) continue;
if (hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
HloInstruction* start = instr->mutable_operand(0);
if (is_sync_start(start)) result.push_back(start);
}
result.push_back(instr);
}
CHECK(input.instructions().size() == result.size())
<< "sync collectives post-processing changed schedule size from "
<< input.instructions().size() << " to " << result.size();
return result;
}
SchedulerConfig GetSchedulerConfig(int64_t memory_limit) {
SchedulerConfig config;
config.all_reduce_overlap_limit = 1;
config.collective_broadcast_overlap_limit = 1;
config.collective_permute_overlap_limit = 1;
config.use_real_cost_model = false;
config.aggressive_scheduling_policies = true;
config.schedule_send_recvs = true;
config.memory_limit = memory_limit;
return config;
}
tensorflow::profiler::ProfiledInstructionsProto GetProfileForFingerprint(
tensorflow::profiler::ProfiledInstructionsProto& profile,
const std::string& fingerprint) {
tensorflow::profiler::ProfiledInstructionsProto result;
bool merge_remat_clones = false;
for (const auto& cost : profile.costs()) {
absl::string_view cost_name = cost.name();
std::string new_cost_name = cost.name();
absl::string_view cost_sep = "::";
if (absl::StrContains(cost_name, cost_sep)) {
std::vector<std::string> split_names =
absl::StrSplit(cost_name, cost_sep);
if (split_names.size() != 2 || split_names[0] != fingerprint) {
continue;
}
new_cost_name = split_names[1];
}
merge_remat_clones |= absl::StrContains(new_cost_name, ".remat");
auto* new_cost = result.add_costs();
new_cost->set_cost_us(cost.cost_us());
new_cost->set_name(new_cost_name);
}
if (!merge_remat_clones) {
return result;
}
auto strip_remat_suffix = [](absl::string_view name) -> absl::string_view {
absl::string_view suffix = ".remat";
size_t index = name.rfind(suffix);
if (index == std::string::npos) {
return name;
}
auto after_suffix = name.substr(index + suffix.size());
int64_t numeric_suffix;
if (after_suffix.empty() ||
absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
return name.substr(0, index);
}
return name;
};
absl::flat_hash_map<absl::string_view, std::pair<double, int64_t>> costs;
for (const auto& cost : result.costs()) {
std::pair<double, int64_t>& data = costs[strip_remat_suffix(cost.name())];
data.first += cost.cost_us();
data.second++;
}
tensorflow::profiler::ProfiledInstructionsProto merged_result;
for (const auto& cost : costs) {
auto* new_cost = merged_result.add_costs();
double average = cost.second.first / cost.second.second;
new_cost->set_cost_us(average);
new_cost->set_name(std::string(cost.first));
}
return merged_result;
}
std::optional<tensorflow::profiler::ProfiledInstructionsProto> ReadPGLEProfile(
const HloModule* module, const std::string& fingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile;
absl::string_view fdo_profile = module->config().fdo_profile();
if (!fdo_profile.empty()) {
if (tsl::ParseProtoUnlimited(&profile, fdo_profile.data(),
fdo_profile.size())) {
LOG(INFO) << "Using PGLE profile for module from fdo_profile (binary)";
return GetProfileForFingerprint(profile, fingerprint);
}
profile.Clear();
if (tsl::protobuf::TextFormat::ParseFromString(std::string(fdo_profile),
&profile)) {
LOG(INFO) << "Using PGLE profile for module from fdo_profile (text)";
return GetProfileForFingerprint(profile, fingerprint);
}
LOG(ERROR) << "Unable to prase FDO profile: not a valid text or binary "
"ProfiledInstructionsProto";
}
const std::string& pgle_profile_file_or_dir_path =
module->config()
.debug_options()
.xla_gpu_pgle_profile_file_or_directory_path();
if (pgle_profile_file_or_dir_path.empty()) {
return std::nullopt;
}
tsl::Env* env = tsl::Env::Default();
auto read_text_or_binary_profile = [&profile, env, &fingerprint](
const std::string& text_path,
const std::string& binary_path)
-> std::optional<tensorflow::profiler::ProfiledInstructionsProto> {
if (env->FileExists(text_path).ok()) {
absl::Status s = tsl::ReadTextProto(env, text_path, &profile);
if (s.ok()) {
LOG(INFO) << "Using PGLE profile from " << text_path;
return GetProfileForFingerprint(profile, fingerprint);
} else {
LOG(ERROR) << "Unable to read PGLE text proto from " << text_path
<< ": " << s.message();
}
profile.Clear();
}
if (env->FileExists(binary_path).ok()) {
absl::Status s = tsl::ReadBinaryProto(env, binary_path, &profile);
if (s.ok()) {
LOG(INFO) << "Using PGLE profile from " << binary_path;
return GetProfileForFingerprint(profile, fingerprint);
} else {
LOG(ERROR) << "Unable to read PGLE binary proto from " << binary_path
<< ": " << s.message();
}
profile.Clear();
}
return std::nullopt;
};
if (env->IsDirectory(pgle_profile_file_or_dir_path).ok()) {
std::string pgle_profile_path_prefix =
pgle_profile_file_or_dir_path + "/" + fingerprint;
return read_text_or_binary_profile(pgle_profile_path_prefix + ".pbtxt",
pgle_profile_path_prefix + ".pb");
}
auto extension = tsl::io::Extension(pgle_profile_file_or_dir_path);
if (extension == "pbtxt") {
return read_text_or_binary_profile(pgle_profile_file_or_dir_path, "");
} else if (extension == "pb") {
return read_text_or_binary_profile("", pgle_profile_file_or_dir_path);
} else {
return read_text_or_binary_profile(pgle_profile_file_or_dir_path,
pgle_profile_file_or_dir_path);
}
}
}
static int64_t GetSchedulerMemoryLimit(
const HloModule* module, const se::DeviceDescription& gpu_device_info,
int pointer_size);
absl::StatusOr<ScheduleMetadata> ScheduleGpuModule(
HloModule* module, int64_t pointer_size,
const se::DeviceDescription& gpu_device_info) {
tsl::profiler::TraceMe traceme("GpuCompiler::CompileToBackendResult");
int64_t memory_limit =
GetSchedulerMemoryLimit(module, gpu_device_info, pointer_size);
if (module->has_schedule()) {
return ScheduleMetadata{memory_limit};
}
HloPassPipeline prepare_pipeline("p2p-schedule-preparation");
prepare_pipeline.AddPass<P2PSchedulePreparation>();
TF_RETURN_IF_ERROR(prepare_pipeline.Run(module).status());
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleGpuModuleWithMemoryScheduler(module, pointer_size));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
std::string fingerprint = module->GetFingerprint128(
HloPrintOptions::Canonical().set_print_backend_config(true));
FrontendAttributes attributes;
(*attributes.mutable_map())[std::string(kFingerprintBeforeLHS)] = fingerprint;
module->add_frontend_attributes(attributes);
VLOG(1) << "Fingerprint before LHS for module " << module->name() << "("
<< module->unique_id() << ") = " << fingerprint;
const bool enable_latency_hiding_scheduler =
module->config()
.debug_options()
.xla_gpu_enable_latency_hiding_scheduler();
if (!enable_latency_hiding_scheduler) {
return ScheduleMetadata{memory_limit};
}
SchedulerConfig config = GetSchedulerConfig(memory_limit);
auto gpu_latency_estimator =
std::make_unique<GpuLatencyEstimator>(pointer_size);
std::unique_ptr<LatencyEstimator> latency_estimator;
std::optional<tensorflow::profiler::ProfiledInstructionsProto> profile =
ReadPGLEProfile(module, fingerprint);
const bool enable_analytical_latency_estimator =
module->config()
.debug_options()
.xla_gpu_enable_analytical_latency_estimator();
HloPassPipeline pipeline("latency-hiding-scheduler");
if (profile.has_value()) {
auto aggregator = std::make_unique<GPUProfileStatisticsAggregator>();
auto pg_latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>(
config, std::move(gpu_latency_estimator), profile.value(),
std::move(aggregator));
LOG(INFO) << "Found profile, using profile guided latency estimator";
VLOG(1) << "Profile:\n" << profile->DebugString();
if (module->config()
.debug_options()
.xla_gpu_enable_pgle_accuracy_checker()) {
pipeline.AddPass<PGLEAccuracyChecker>(*pg_latency_estimator);
}
latency_estimator = std::move(pg_latency_estimator);
} else if (enable_analytical_latency_estimator) {
latency_estimator = std::make_unique<AnalyticalLatencyEstimator>(
config, std::move(gpu_latency_estimator), gpu_device_info,
[input_pointer_size = pointer_size](const Shape& shape) {
return GetSizeOfShape(shape, input_pointer_size);
},
module->entry_computation());
LOG(INFO) << "Using analytical latency estimator";
} else {
latency_estimator = std::move(gpu_latency_estimator);
}
auto async_tracker = [&]() -> std::unique_ptr<AsyncTracker> {
return module->config()
.debug_options()
.xla_gpu_lhs_enable_gpu_async_tracker()
? std::make_unique<GpuAsyncTracker>(config)
: std::make_unique<GpuAsyncTrackerBase>(config);
}();
auto shape_size_in_bytes = [pointer_size](const Shape& shape) {
return GetSizeOfShape(shape, pointer_size);
};
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_in_bytes, async_tracker.get(), latency_estimator.get(),
config);
pipeline.AddPass<SchedulingInstructionAnnotator>();
pipeline.AddPass<LatencyHidingScheduler>(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_in_bytes);
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
HloPassPipeline postprocessing_pipeline("schedule-postprocessing");
postprocessing_pipeline.AddPass<SchedulePostprocessing>();
TF_RETURN_IF_ERROR(postprocessing_pipeline.Run(module).status());
return ScheduleMetadata{memory_limit};
}
absl::StatusOr<HloSchedule> ScheduleGpuModuleWithMemoryScheduler(
const HloModule* module, int64_t pointer_size, int64_t* peak_memory_bytes) {
return ScheduleModule(
module,
[pointer_size](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size);
},
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler,
PostProcessSchedule),
{}, peak_memory_bytes);
}
HloInstructionSequence PostProcessSchedule(
const HloInstructionSequence& input) {
HloInstructionSequence result = PostprocessorToScheduleSyncCollectives(input);
return PostprocessorToScheduleAsEarlyOrLateAsPossible(result);
}
static int64_t GetSchedulerMemoryLimit(
const HloModule* module, const se::DeviceDescription& gpu_device_info,
int pointer_size) {
const int64_t base_limit =
module->config().device_memory_size() != 0
? module->config().device_memory_size()
: gpu_device_info.device_memory_size() * 80 / 100;
int64_t total_io_size = 0;
for (HloInstruction* param :
module->entry_computation()->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
param->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
total_io_size += GetSizeOfShape(subshape, pointer_size);
});
}
ShapeUtil::ForEachSubshape(
module->result_shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
total_io_size += GetSizeOfShape(subshape, pointer_size);
});
module->input_output_alias_config().ForEachAlias(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias&) {
const Shape& subshape =
ShapeUtil::GetSubshape(module->result_shape(), output_index);
total_io_size -= GetSizeOfShape(subshape, pointer_size);
});
int64_t limit =
(base_limit - total_io_size) *
module->config().debug_options().xla_gpu_memory_limit_slop_factor() / 100;
return limit;
}
}
} | #include "xla/service/gpu/gpu_hlo_schedule.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/backend.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace gpu {
using ::testing::ElementsAre;
using ::tsl::testing::StatusIs;
class GpuHloScheduleTest : public HloTestBase {
protected:
using HloVec = std::vector<HloInstruction*>;
Shape f32_2x2_ = ShapeUtil::MakeShape(F32, {2, 2});
SequentialHloOrdering BuildHloOrdering(HloModule* module) {
Backend& test_backend = backend();
const se::DeviceDescription& gpu_device_info =
test_backend.default_stream_executor()->GetDeviceDescription();
TF_CHECK_OK(ScheduleGpuModule(module, 8, gpu_device_info)
.status());
return SequentialHloOrdering{module->schedule()};
}
HloModuleConfig GetModuleConfig(bool enable_latency_hiding_scheduler,
bool enable_gpu_async_tracker = false,
absl::string_view fdo_profile = "") {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_latency_hiding_scheduler(
enable_latency_hiding_scheduler);
debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker(
enable_gpu_async_tracker);
config.set_debug_options(debug_options);
*config.mutable_fdo_profile() = fdo_profile;
return config;
}
std::unique_ptr<HloModule> CreateNewVerifiedModule(
bool enable_latency_hiding_scheduler = false) {
return std::make_unique<HloModule>(
"test_module", GetModuleConfig(enable_latency_hiding_scheduler));
}
static bool HasValidFingerprint(HloModule* module) {
const FrontendAttributes& attrs = module->frontend_attributes();
auto it = attrs.map().find(kFingerprintBeforeLHS);
return it != attrs.map().end() && it->second.size() == 128 / 4;
}
};
TEST_F(GpuHloScheduleTest, SequentialMatMul) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* dot1 =
builder.AddInstruction(CreateCanonicalDot(f32_2x2_, x, y));
HloInstruction* dot2 =
builder.AddInstruction(CreateCanonicalDot(f32_2x2_, dot1, z));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(dot2));
SequentialHloOrdering order = BuildHloOrdering(module.get());
EXPECT_TRUE(order.ExecutesBefore(y, x));
EXPECT_TRUE(order.ExecutesBefore(y, dot1));
EXPECT_TRUE(order.ExecutesBefore(z, dot1));
EXPECT_TRUE(order.ExecutesBefore(z, dot2));
EXPECT_TRUE(order.ExecutesBefore(dot1, dot2));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, SequentialAdd) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, y, z));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(add3));
SequentialHloOrdering order = BuildHloOrdering(module.get());
EXPECT_TRUE(order.ExecutesBefore(y, x));
EXPECT_TRUE(order.ExecutesBefore(y, add1));
EXPECT_TRUE(order.ExecutesBefore(z, add1));
EXPECT_TRUE(order.ExecutesBefore(z, add2));
EXPECT_TRUE(order.ExecutesBefore(add1, add2));
EXPECT_TRUE(order.ExecutesBefore(add2, add3));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, AsyncCustomCall) {
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
HloInstruction* nonblocking_call =
builder.AddInstruction(HloInstruction::CreateCustomCall(
f32_2x2_, {add0},
"nonblocking-call-start",
""));
static_cast<HloCustomCallInstruction*>(nonblocking_call)
->set_custom_call_schedule(SCHEDULE_EARLIEST);
TF_CHECK_OK(add1->AddControlDependencyTo(nonblocking_call));
HloInstruction* blocking_call =
builder.AddInstruction(HloInstruction::CreateCustomCall(
f32_2x2_, {nonblocking_call},
"blocking-call-done",
""));
static_cast<HloCustomCallInstruction*>(blocking_call)
->set_custom_call_schedule(SCHEDULE_LATEST);
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, blocking_call));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, nonblocking_call));
EXPECT_TRUE(order.ExecutesBefore(add1, nonblocking_call));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add2));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add3));
EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, blocking_call));
EXPECT_TRUE(order.ExecutesBefore(blocking_call, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, AsyncCollectivePermute) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
Shape u32_scalar = ShapeUtil::MakeShape(U32, {});
Shape collective_permute_start_shape =
ShapeUtil::MakeTupleShape({f32_2x2_, f32_2x2_});
HloInstruction* collective_permute_start =
builder.AddInstruction(HloInstruction::CreateCollectivePermuteStart(
collective_permute_start_shape, add0,
{{0, 1}}, std::nullopt));
TF_CHECK_OK(add1->AddControlDependencyTo(collective_permute_start));
HloInstruction* collective_permute_done = builder.AddInstruction(
HloInstruction::CreateUnary(f32_2x2_, HloOpcode::kCollectivePermuteDone,
collective_permute_start));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, collective_permute_done));
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, collective_permute_start));
EXPECT_TRUE(order.ExecutesBefore(add1, collective_permute_start));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add2));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add3));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, collective_permute_done));
EXPECT_TRUE(order.ExecutesBefore(collective_permute_done, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSCostModel) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add5)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
std::vector<int64_t> count_between_pairs;
bool in_between = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->opcode() == HloOpcode::kAllReduceStart) {
in_between = true;
count_between_pairs.push_back(0);
} else if (inst->opcode() == HloOpcode::kAllReduceDone) {
in_between = false;
} else if (in_between && inst->opcode() == HloOpcode::kCustomCall) {
count_between_pairs.back()++;
}
}
EXPECT_EQ(count_between_pairs.size(), 2);
EXPECT_GT(count_between_pairs[0], 0);
EXPECT_GT(count_between_pairs[1], 0);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest,
ScheduleGpuModuleWithMemorySchedulerReturnsPeakMemoryBytes) {
absl::string_view kHloText = R"(
HloModule m
ENTRY ar {
p0 = f32[32,32] parameter(0)
p1 = f32[32,32] parameter(1)
ROOT _ = f32[32,32]{1,0} custom-call(p0, p1),
custom_call_target="__cublas$gemm"
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
kHloText, GetModuleConfig(true)));
int64_t pointer_size =
dynamic_cast<GpuCompiler*>(backend().compiler())->GetPointerSize();
int64_t peak_memory_bytes = -1;
TF_ASSERT_OK_AND_ASSIGN(auto schedule,
ScheduleGpuModuleWithMemoryScheduler(
module.get(), pointer_size, &peak_memory_bytes));
EXPECT_GT(peak_memory_bytes, 0);
}
TEST_F(GpuHloScheduleTest, LHSCostModelCostlyAR) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = bf16[] parameter(0)
y = bf16[] parameter(1)
ROOT apply_op = bf16[] add(x, y)
}
ENTRY ar {
p0 = bf16[32505856] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = bf16[32505856] all-reduce-start(p0), to_apply=apply_op
ar-done = bf16[32505856] all-reduce-done(ar-start)
ROOT t = (bf16[32505856], f32[32,32]) tuple(ar-done, dot6)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
std::vector<int64_t> count_between_pairs;
bool in_between = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->opcode() == HloOpcode::kAllReduceStart) {
in_between = true;
count_between_pairs.push_back(0);
} else if (inst->opcode() == HloOpcode::kAllReduceDone) {
in_between = false;
} else if (in_between && inst->opcode() == HloOpcode::kCustomCall) {
count_between_pairs.back()++;
}
}
EXPECT_EQ(count_between_pairs.size(), 1);
EXPECT_EQ(count_between_pairs[0], 7);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModel) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
struct SubTest {
std::string profile;
std::string target_start, target_done;
};
std::vector<SubTest> subtests;
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1000.0 }
costs { name: "ar-start1" cost_us: 10.0 }
)pb";
subtests.push_back({ar_long_latency_proto_text, "ar-start", "ar-done"});
const std::string ar1_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 10.0 }
costs { name: "ar-start1" cost_us: 1000.0 }
)pb";
tensorflow::profiler::ProfiledInstructionsProto profile;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
ar1_long_latency_proto_text, &profile));
std::string ar1_long_latency_proto_binary = profile.SerializeAsString();
subtests.push_back({profile.SerializeAsString(), "ar-start1", "ar-done1"});
for (const SubTest& subtest : subtests) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true,
subtest.profile)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
bool between_target_collective_pair = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->name() == subtest.target_start) {
between_target_collective_pair = true;
} else if (inst->name() == subtest.target_done) {
between_target_collective_pair = false;
} else if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kAdd) {
EXPECT_TRUE(between_target_collective_pair);
}
}
}
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelFailsWithIncompleteProfile) {
const absl::string_view kHloString = R"(
HloModule m
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32,32] parameter(1)
p2 = f32[32,32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const absl::string_view kProfile = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1000.0 }
)pb";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
kHloString, GetModuleConfig(true,
true,
kProfile)));
HloModuleConfig config(module->config());
DebugOptions dboptions(config.debug_options());
dboptions.set_xla_gpu_enable_pgle_accuracy_checker(true);
config.set_debug_options(dboptions);
module->set_config(config);
EXPECT_THAT(ScheduleGpuModule(
module.get(), 8,
backend().default_stream_executor()->GetDeviceDescription())
.status(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(
GpuHloScheduleTest,
ProfileGuidedCostModelDoesNotFailWithIncompleteProfileIfAccuracyCheckerIsDisabled) {
const absl::string_view kHloString = R"(
HloModule m
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32,32] parameter(1)
p2 = f32[32,32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const absl::string_view kProfile = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1000.0 }
)pb";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
kHloString, GetModuleConfig(true,
true,
kProfile)));
module->mutable_config().mutable_debug_options().add_xla_disable_hlo_passes(
"pgle-accuracy-checker");
TF_EXPECT_OK(ScheduleGpuModule(
module.get(), 8,
backend().default_stream_executor()->GetDeviceDescription())
.status());
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelWithRematData) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1.0 }
costs { name: "ar-start1" cost_us: 1.0 }
costs { name: "ar-start.remat100" cost_us: 2000.0 }
)pb";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(true,
true,
ar_long_latency_proto_text)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* entry = module->entry_computation();
bool between_target_collective_pair = false;
for (const HloInstruction* inst :
order.SequentialOrder(*entry)->instructions()) {
if (inst->name() == "ar-start") {
between_target_collective_pair = true;
} else if (inst->name() == "ar-done") {
between_target_collective_pair = false;
} else if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kAdd) {
EXPECT_TRUE(between_target_collective_pair);
}
}
}
TEST_F(GpuHloScheduleTest, LHSSendRecv) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1
send-done = token[] send-done(send), channel_id=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_LT(get_index("recv"), get_index("send"));
EXPECT_LT(get_index("send"), get_index("recv-done"));
EXPECT_GE(get_index("send-done") - get_index("recv-done"), 8);
EXPECT_LT(abs(get_index("send-done") - get_index("result")), 2);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSSendRecvPairs2) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all-0 = token[] after-all()
recv-0 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send-0 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-0),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done-0 = (f32[1, 1024, 1024], token[]) recv-done(recv-0), channel_id=1
send-done-0 = token[] send-done(send-0), channel_id=1
recv-data-0 = f32[1, 1024, 1024] get-tuple-element(recv-done-0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
bc1 = f32[1, 1024, 1024] broadcast(conv), dimensions={}
after-all-1 = token[] after-all()
recv-1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1, 0}}"
}
send-1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1, 0}}"
}
recv-done-1 = (f32[1, 1024, 1024], token[]) recv-done(recv-1), channel_id=2
send-done-1 = token[] send-done(send-1), channel_id=2
recv-data-1 = f32[1, 1024, 1024] get-tuple-element(recv-done-1), index=0
add2 = f32[1, 1024, 1024] add(recv-data-0, bc1)
add = f32[1, 1024, 1024] add(recv-data-1, add2)
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, add)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_TRUE(HasValidFingerprint(module.get()));
EXPECT_LT(get_index("recv-1"), get_index("send-1"));
EXPECT_LT(get_index("send-1"), get_index("recv-done-1"));
EXPECT_GT(get_index("send-done-1"), get_index("send-1"));
EXPECT_LT(get_index("send-done-1"), get_index("recv-0"));
EXPECT_LT(abs(get_index("send-done-0") - get_index("result")), 2);
}
TEST_F(GpuHloScheduleTest, LHSSendRecvAllReduce) {
const char* hlo_text = R"(
HloModule test
add (x: f32[], y: f32[]) -> f32[] {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(f32[] x, f32[] y)
}
while_cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT cond_result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}}"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1
send-done = token[] send-done(send), channel_id=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
all-reduce-start = f32[1, 1024, 1024] all-reduce-start(f32[1, 1024, 1024] p),
replica_groups={{0,1}}, to_apply=add, backend_config={"collective_backend_config":{"is_sync":false}}
all-reduce-done = f32[1, 1024, 1024] all-reduce-done(f32[1, 1024, 1024] all-reduce-start)
new-data = f32[1, 1024, 1024] add(s, all-reduce-done)
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, new-data)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init),
body=while_body, condition=while_cond
ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
HloComputation* while_body = module->GetComputationWithName("while_body");
const std::vector<HloInstruction*>& instruction_sequence =
order.SequentialOrder(*while_body)->instructions();
auto get_index = [&](absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_LT(get_index("recv"), get_index("send"));
EXPECT_LT(get_index("send"), get_index("recv-done"));
EXPECT_GE(get_index("send-done") - get_index("recv-done"), 3);
EXPECT_TRUE(get_index("send-done") < get_index("all-reduce-start") ||
get_index("recv") > get_index("all-reduce-start"));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_F(GpuHloScheduleTest, LHSSendRecvPipelined1) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-data = f32[1, 1024, 1024] add(c, s)
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init = (u32[], (f32[1,1024,1024], token[]), token[])
tuple(c0, recv-done.2, send-done.2)
while-result = (u32[], (f32[1,1024,1024], token[]), token[])
while(while-init),
body=while_body, condition=while_cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result), index=1
ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
const std::vector<HloInstruction*>& while_body =
order.SequentialOrder(*module->GetComputationWithName("while_body"))
->instructions();
const std::vector<HloInstruction*>& main =
order.SequentialOrder(*module->GetComputationWithName("main"))
->instructions();
auto get_index =
[](absl::string_view hlo_name,
const std::vector<HloInstruction*>& instruction_sequence) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_TRUE(HasValidFingerprint(module.get()));
EXPECT_EQ(get_index("recv.2", main) + 1, get_index("send.2", main));
EXPECT_LT(get_index("send.2", main), get_index("recv-done.2", main));
EXPECT_LT(get_index("recv-done.2", main), get_index("send-done.2", main));
EXPECT_LT(get_index("send-done.2", main), get_index("while-result", main));
EXPECT_EQ(get_index("recv.1", while_body) + 1,
get_index("send.1", while_body));
EXPECT_LT(get_index("send.1", while_body),
get_index("recv-done.1", while_body));
EXPECT_LT(get_index("recv-done.1", while_body),
get_index("send-done.1", while_body));
}
TEST_F(GpuHloScheduleTest, LHSSendRecvPipelined2) {
const char* hlo_text = R"(
HloModule test
while_cond {
param = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while_body {
param = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.0.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1
recv-data.0 = f32[1, 1024, 1024] get-tuple-element(recv-done.0.q), index=0
recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=3
recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={}
recv-data = f32[1, 1024, 1024] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-data = f32[1, 1024, 1024] add(c, s)
after-all.0 = token[] after-all()
send.0 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.0),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv.0 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (f32[1,1024,1024], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1 = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[])
tuple(new-count, recv-done.0, send-done.0, recv-done.1, send-done.1)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.3 = token[] after-all()
recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
send.3 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.3), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.3 = (f32[1,1024,1024], token[]) recv-done(recv.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.3 = token[] send-done(send.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
while-init = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.2, send-done.2, recv-done.3, send-done.3)
while-result = (u32[], (f32[1,1024,1024], token[]), token[],
(f32[1,1024,1024], token[]), token[]) while(while-init),
body=while_body, condition=while_cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result), index=1
recv-data.2 = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
recv-done.3.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result), index=3
recv-data.3 = f32[1, 1024, 1024] get-tuple-element(recv-done.3.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={}
ROOT entry-result = f32[1, 1024, 1024] select(compare, recv-data.2, recv-data.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text, GetModuleConfig(true,
true)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
const std::vector<HloInstruction*>& while_body =
order.SequentialOrder(*module->GetComputationWithName("while_body"))
->instructions();
const std::vector<HloInstruction*>& main =
order.SequentialOrder(*module->GetComputationWithName("main"))
->instructions();
auto get_index =
[](absl::string_view hlo_name,
const std::vector<HloInstruction*>& instruction_sequence) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_TRUE(HasValidFingerprint(module.get()));
EXPECT_EQ(get_index("recv.2", main) + 1, get_index("send.2", main));
EXPECT_LT(get_index("send.2", main), get_index("recv.3", main));
EXPECT_EQ(get_index("recv.3", main) + 1, get_index("send.3", main));
EXPECT_LT(get_index("send.3", main), get_index("recv-done.2", main));
EXPECT_LT(get_index("recv-done.2", main), get_index("recv-done.3", main));
EXPECT_LT(get_index("recv-done.3", main), get_index("send-done.2", main));
EXPECT_LT(get_index("send-done.2", main), get_index("send-done.3", main));
EXPECT_LT(get_index("send-done.3", main), get_index("while-result", main));
EXPECT_EQ(get_index("recv.0", while_body) + 1,
get_index("send.0", while_body));
EXPECT_LT(get_index("send.0", while_body), get_index("recv.1", while_body));
EXPECT_EQ(get_index("recv.1", while_body) + 1,
get_index("send.1", while_body));
EXPECT_LT(get_index("send.1", while_body),
get_index("recv-done.0", while_body));
EXPECT_LT(get_index("recv-done.0", while_body),
get_index("recv-done.1", while_body));
EXPECT_LT(get_index("recv-done.1", while_body),
get_index("send-done.0", while_body));
EXPECT_LT(get_index("send-done.0", while_body),
get_index("send-done.1", while_body));
}
TEST_F(GpuHloScheduleTest, SkipAlreadyScheduled) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m, is_scheduled=true
fused_computation {
param_0 = f32[1024,1024]{1,0} parameter(0)
ROOT exponential.1 = f32[1024,1024]{1,0} exponential(param_0)
}
fused_computation.1 {
param_0.1 = f32[1024,1024]{1,0} parameter(0)
ROOT negate.1 = f32[1024,1024]{1,0} negate(param_0.1)
}
ENTRY e {
p = f32[1024,1024]{1,0} parameter(0)
wrapped_negate = f32[1024,1024]{1,0} fusion(p), kind=kLoop, calls=fused_computation.1
wrapped_exponential = f32[1024,1024]{1,0} fusion(p), kind=kLoop, calls=fused_computation
ROOT t = (f32[1024,1024]{1,0}, f32[1024,1024]{1,0}) tuple(wrapped_exponential, wrapped_negate)
})")
.value();
TF_CHECK_OK(ScheduleGpuModule(
module.get(), 8,
backend().default_stream_executor()->GetDeviceDescription())
.status());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelWithForceEarliestSchedule) {
const char* hlo_text = R"(
HloModule AsyncAR
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY main {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm", backend_config={"force_earliest_schedule":true}
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ROOT t = (f32[32], f32[32,32]) tuple(ar-done, add0)
})";
const std::string ar_long_latency_proto_text = R"pb(
costs { name: "dot0" cost_us: 100.0 }
costs { name: "dot1" cost_us: 100.0 }
costs { name: "add0" cost_us: 10.0 }
costs { name: "ar-start" cost_us: 1000.0 }
)pb";
tensorflow::profiler::ProfiledInstructionsProto profile;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
ar_long_latency_proto_text, &profile));
std::string ar_long_latency_proto_binary = profile.SerializeAsString();
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(true,
false,
ar_long_latency_proto_binary)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
const std::vector<HloInstruction*>& main =
order.SequentialOrder(*module->GetComputationWithName("main"))
->instructions();
auto get_index =
[](absl::string_view hlo_name,
const std::vector<HloInstruction*>& instruction_sequence) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
};
EXPECT_LT(get_index("dot0", main), get_index("ar-start", main));
EXPECT_GT(get_index("dot1", main), get_index("ar-start", main));
EXPECT_LT(get_index("dot1", main), get_index("ar-done", main));
}
class GpuHloScheduleParameterizedTest
: public GpuHloScheduleTest,
public ::testing::WithParamInterface<bool> {};
TEST_P(GpuHloScheduleParameterizedTest, AsyncAllReduce) {
HloComputation::Builder reduction_builder("add");
HloInstruction* x0 =
reduction_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(F32),
"x"));
HloInstruction* y0 =
reduction_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeScalarShape(F32),
"y"));
HloInstruction* add =
reduction_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(F32), HloOpcode::kAdd, x0, y0));
const bool use_latency_hiding_scheduler = GetParam();
std::unique_ptr<HloModule> module =
CreateNewVerifiedModule(use_latency_hiding_scheduler);
HloComputation* reduction_computation =
module->AddEmbeddedComputation(reduction_builder.Build(add));
HloComputation::Builder builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32_2x2_, "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, f32_2x2_, "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, f32_2x2_, "z"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z));
Shape all_reduce_start_shape =
ShapeUtil::MakeTupleShape({f32_2x2_, f32_2x2_});
HloInstruction* all_reduce_start =
builder.AddInstruction(HloInstruction::CreateAllReduceStart(
all_reduce_start_shape, {add0}, reduction_computation,
CollectiveDeviceList(), false,
std::nullopt, true));
TF_CHECK_OK(add1->AddControlDependencyTo(all_reduce_start));
HloInstruction* all_reduce_done =
builder.AddInstruction(HloInstruction::CreateUnary(
f32_2x2_, HloOpcode::kAllReduceDone, all_reduce_start));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2));
HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_2x2_, HloOpcode::kAdd, add3, all_reduce_done));
module->AddEntryComputation(builder.Build(add4));
SequentialHloOrdering order = BuildHloOrdering(module.get());
VLOG(2) << order.ToString();
EXPECT_TRUE(order.ExecutesBefore(add0, all_reduce_start));
EXPECT_TRUE(order.ExecutesBefore(add1, all_reduce_start));
EXPECT_TRUE(order.ExecutesBefore(all_reduce_start, add2));
EXPECT_TRUE(order.ExecutesBefore(all_reduce_start, add3));
EXPECT_TRUE(order.ExecutesBefore(all_reduce_start, add4));
EXPECT_TRUE(order.ExecutesBefore(add3, all_reduce_done));
EXPECT_TRUE(order.ExecutesBefore(all_reduce_done, add4));
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
TEST_P(GpuHloScheduleParameterizedTest, LHSResourceModel) {
const char* hlo_text = R"(
HloModule AsyncModule
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
%ag-start = (f32[32], f32[64]) all-gather-start(p3), dimensions={0}
%ag-done = f32[64] all-gather-done(%ag-start)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[64], f32[32,32]) tuple(ar-done, %ag-done, add5)
})";
const bool enable_gpu_async_tracker = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(
hlo_text,
GetModuleConfig(
true,
enable_gpu_async_tracker)));
SequentialHloOrdering order = BuildHloOrdering(module.get());
uint32_t in_flight = 0;
uint32_t max_in_flight = 0;
for (const HloInstruction* inst :
order.SequentialOrder(*module->entry_computation())->instructions()) {
if (hlo_query::IsAsyncCollectiveStartOp(inst)) {
in_flight++;
max_in_flight = std::max(max_in_flight, in_flight);
} else if (hlo_query::IsAsyncCollectiveDoneOp(inst)) {
in_flight--;
}
}
const uint32_t expected_max_in_flight = enable_gpu_async_tracker ? 1 : 2;
EXPECT_EQ(expected_max_in_flight, max_in_flight);
EXPECT_TRUE(HasValidFingerprint(module.get()));
}
INSTANTIATE_TEST_SUITE_P(GpuHloScheduleParameterizedTest,
GpuHloScheduleParameterizedTest, ::testing::Bool());
using GpuHloSchedulePostProcessTest = HloTestBase;
TEST_F(GpuHloSchedulePostProcessTest, PostProcessAsyncCollectives) {
const char* hlo_text = R"(
HloModule AsyncModule, is_scheduled=true
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
add0 = f32[32] add(p0, p0)
ar-done = f32[32] all-reduce-done(ar-start)
ag-start = (f32[32], f32[64]) all-gather-start(p1), dimensions={0}, backend_config="{\"collective_backend_config\":{\"is_sync\":true}}"
add1 = f32[32] add(p1, p1)
ag-done = f32[64] all-gather-done(ag-start)
add2 = f32[32] add(add0, add1)
add3 = f32[32] add(add2, ar-done)
ROOT result = (f32[32], f32[64]) tuple(add3, ag-done)
})";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(hlo_text, 2));
const HloInstructionSequence& input =
module->schedule().sequence(module->entry_computation());
HloInstructionSequence result = PostProcessSchedule(input);
const std::vector<std::string_view> expected_sequence = {
"p0",
"ar-start",
"p1", "add0", "add1",
"ag-start",
"ag-done", "add2",
"ar-done",
"add3", "result"};
ASSERT_EQ(expected_sequence.size(), result.size());
for (int i = 0; i < result.size(); ++i) {
EXPECT_EQ(expected_sequence[i], result.instructions()[i]->name());
}
}
TEST_F(GpuHloScheduleTest, AsyncOps) {
const char* hlo_text = R"(
HloModule m
op1 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
op2 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
ENTRY main {
p0 = f32[2,2] parameter(0)
acc1_start = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=op1
acc1_done = f32[2,2] fusion-done(acc1_start)
acc2_start = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=op2
acc2_done = f32[2,2] fusion-done(acc2_start)
ROOT done = f32[2,2] add(acc1_done, acc2_done)
})";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<xla::VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text, HloModuleConfig{}));
SequentialHloOrdering order = BuildHloOrdering(module.get());
std::vector<HloOpcode> opcodes;
for (HloInstruction* instruction :
order.SequentialOrder(*module->entry_computation())->instructions()) {
opcodes.push_back(instruction->opcode());
}
EXPECT_THAT(opcodes,
ElementsAre(HloOpcode::kParameter, HloOpcode::kAsyncStart,
HloOpcode::kAsyncStart, HloOpcode::kAsyncDone,
HloOpcode::kAsyncDone, HloOpcode::kAdd));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_hlo_schedule.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_hlo_schedule_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ed3e975-ab7f-4ee0-99bc-cac0be8541df | cpp | tensorflow/tensorflow | eigen_backward_cuboid_convolutions | tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h | tensorflow/core/kernels/eigen_backward_cuboid_convolutions_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_BACKWARD_CUBOID_CONVOLUTIONS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/kernels/eigen_cuboid_convolution.h"
namespace Eigen {
template <typename OutputBackward, typename Kernel>
EIGEN_ALWAYS_INLINE static const std::conditional_t<
internal::traits<OutputBackward>::Layout == ColMajor,
TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorShufflingOp<
const array<
typename internal::traits<OutputBackward>::Index, 5>,
const TensorReverseOp<const Eigen::array<bool, 5>,
const Kernel>>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
const OutputBackward>>>>,
TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
const OutputBackward>>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
2>,
const TensorShufflingOp<
const array<
typename internal::traits<OutputBackward>::Index, 5>,
const TensorReverseOp<const Eigen::array<bool, 5>,
const Kernel>>>>>>>
CuboidConvolutionBackwardInput(
const Kernel& kernel, const OutputBackward& output_backward,
typename internal::traits<OutputBackward>::Index inputPlanes,
typename internal::traits<OutputBackward>::Index inputRows,
typename internal::traits<OutputBackward>::Index inputCols,
const DenseIndex plane_stride = 1, const DenseIndex row_stride = 1,
const DenseIndex col_stride = 1) {
typedef typename internal::traits<OutputBackward>::Index TensorIndex;
const TensorRef<const Tensor<typename internal::traits<Kernel>::Scalar,
internal::traits<Kernel>::NumDimensions,
internal::traits<Kernel>::Layout, TensorIndex>>
kern(kernel);
const TensorRef<
const Tensor<typename internal::traits<OutputBackward>::Scalar,
internal::traits<OutputBackward>::NumDimensions,
internal::traits<OutputBackward>::Layout, TensorIndex>>
out(output_backward);
EIGEN_STATIC_ASSERT(internal::traits<Kernel>::Layout ==
internal::traits<OutputBackward>::Layout,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor =
(internal::traits<OutputBackward>::Layout == ColMajor);
static const int NumDims = internal::traits<OutputBackward>::NumDimensions;
const TensorIndex kernelFilters =
isColMajor ? kern.dimensions()[0] : kern.dimensions()[4];
const TensorIndex kernelChannels =
isColMajor ? kern.dimensions()[1] : kern.dimensions()[3];
const TensorIndex kernelPlanes =
isColMajor ? kern.dimensions()[2] : kern.dimensions()[2];
const TensorIndex kernelRows =
isColMajor ? kern.dimensions()[3] : kern.dimensions()[1];
const TensorIndex kernelCols =
isColMajor ? kern.dimensions()[4] : kern.dimensions()[0];
const TensorIndex outputPlanes =
isColMajor ? out.dimensions()[1] : out.dimensions()[NumDims - 2];
const TensorIndex outputRows =
isColMajor ? out.dimensions()[2] : out.dimensions()[NumDims - 3];
const TensorIndex outputCols =
isColMajor ? out.dimensions()[3] : out.dimensions()[NumDims - 4];
const TensorIndex kernelPlanesEff = kernelPlanes;
const TensorIndex kernelRowsEff = kernelRows;
const TensorIndex kernelColsEff = kernelCols;
const TensorIndex forward_pad_top_z = numext::maxi<Index>(
0,
((outputPlanes - 1) * plane_stride + kernelPlanesEff - inputPlanes) / 2);
const TensorIndex forward_pad_top = numext::maxi<Index>(
0, ((outputRows - 1) * row_stride + kernelRowsEff - inputRows) / 2);
const TensorIndex forward_pad_left = numext::maxi<Index>(
0, ((outputCols - 1) * col_stride + kernelColsEff - inputCols) / 2);
const TensorIndex padding_top_z = kernelPlanesEff - 1 - forward_pad_top_z;
const TensorIndex padding_top = kernelRowsEff - 1 - forward_pad_top;
const TensorIndex padding_left = kernelColsEff - 1 - forward_pad_left;
const TensorIndex padding_bottom_z = inputPlanes -
(outputPlanes - 1) * plane_stride - 2 -
padding_top_z + kernelPlanesEff;
const TensorIndex padding_bottom = inputRows - (outputRows - 1) * row_stride -
2 - padding_top + kernelRowsEff;
const TensorIndex padding_right = inputCols - (outputCols - 1) * col_stride -
2 - padding_left + kernelColsEff;
eigen_assert(padding_top_z >= 0);
eigen_assert(padding_top >= 0);
eigen_assert(padding_left >= 0);
eigen_assert(padding_bottom_z >= 0);
eigen_assert(padding_bottom >= 0);
eigen_assert(padding_right >= 0);
Eigen::array<bool, 5> kernel_reverse;
if (isColMajor) {
kernel_reverse[0] = false;
kernel_reverse[1] = false;
kernel_reverse[2] = true;
kernel_reverse[3] = true;
kernel_reverse[4] = true;
} else {
kernel_reverse[0] = true;
kernel_reverse[1] = true;
kernel_reverse[2] = true;
kernel_reverse[3] = false;
kernel_reverse[4] = false;
}
array<TensorIndex, 5> kernel_shuffle;
if (isColMajor) {
kernel_shuffle[0] = 0;
kernel_shuffle[1] = 2;
kernel_shuffle[2] = 3;
kernel_shuffle[3] = 4;
kernel_shuffle[4] = 1;
} else {
kernel_shuffle[0] = 3;
kernel_shuffle[1] = 0;
kernel_shuffle[2] = 1;
kernel_shuffle[3] = 2;
kernel_shuffle[4] = 4;
}
DSizes<TensorIndex, 2> kernel_dims;
if (isColMajor) {
kernel_dims[0] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
kernel_dims[1] = kernelChannels;
} else {
kernel_dims[1] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
kernel_dims[0] = kernelChannels;
}
DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
pre_contract_dims[0] =
kernelFilters * kernelPlanes * kernelRows * kernelCols;
pre_contract_dims[1] = inputPlanes * inputRows * inputCols;
for (int i = 4; i < NumDims; ++i) {
pre_contract_dims[1] *= out.dimension(i);
}
} else {
pre_contract_dims[1] =
kernelFilters * kernelPlanes * kernelRows * kernelCols;
pre_contract_dims[0] = inputPlanes * inputRows * inputCols;
for (int i = 0; i < NumDims - 4; ++i) {
pre_contract_dims[0] *= out.dimension(i);
}
}
array<IndexPair<TensorIndex>, 1> contract_dims;
if (isColMajor) {
contract_dims[0] = IndexPair<TensorIndex>(0, 0);
} else {
contract_dims[0] = IndexPair<TensorIndex>(1, 1);
}
DSizes<TensorIndex, NumDims> post_contract_dims;
if (isColMajor) {
post_contract_dims[0] = kernelChannels;
post_contract_dims[1] = inputPlanes;
post_contract_dims[2] = inputRows;
post_contract_dims[3] = inputCols;
for (int i = 4; i < NumDims; ++i) {
post_contract_dims[i] = out.dimension(i);
}
} else {
post_contract_dims[NumDims - 1] = kernelChannels;
post_contract_dims[NumDims - 2] = inputPlanes;
post_contract_dims[NumDims - 3] = inputRows;
post_contract_dims[NumDims - 4] = inputCols;
for (int i = 0; i < NumDims - 4; ++i) {
post_contract_dims[i] = out.dimension(i);
}
}
return choose(
Cond<internal::traits<OutputBackward>::Layout == ColMajor>(),
kernel.reverse(kernel_reverse)
.shuffle(kernel_shuffle)
.reshape(kernel_dims)
.eval()
.contract(output_backward
.extract_volume_patches(
kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
plane_stride, row_stride, col_stride, padding_top_z,
padding_bottom_z, padding_top, padding_bottom,
padding_left, padding_right)
.reshape(pre_contract_dims),
contract_dims)
.reshape(post_contract_dims),
output_backward
.extract_volume_patches(kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
plane_stride, row_stride, col_stride,
padding_top_z, padding_bottom_z, padding_top,
padding_bottom, padding_left, padding_right)
.reshape(pre_contract_dims)
.contract(kernel.reverse(kernel_reverse)
.shuffle(kernel_shuffle)
.reshape(kernel_dims)
.eval(),
contract_dims)
.reshape(post_contract_dims));
}
template <typename OutputBackward, typename Input>
EIGEN_ALWAYS_INLINE static const std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const TensorReverseOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorShufflingOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<Input>::Index>, 1>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const OutputBackward>>>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const TensorVolumePatchOp<
Dynamic, Dynamic, Dynamic,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Input>>>>>>>>,
const TensorReverseOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorShufflingOp<
const Eigen::array<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Eigen::TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorContractionOp<
const array<
IndexPair<typename internal::traits<Input>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const TensorVolumePatchOp<
Dynamic, Dynamic, Dynamic,
const Eigen::TensorForcedEvalOp<
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const Input>>>>,
const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<Input>::Index,
2>,
const Eigen::TensorShufflingOp<
const Eigen::array<
typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const OutputBackward>>>>>>>>
CuboidConvolutionBackwardKernel(
const Input& input, const OutputBackward& output_backward,
typename internal::traits<Input>::Index kernelPlanes,
typename internal::traits<Input>::Index kernelRows,
typename internal::traits<Input>::Index kernelCols,
const DenseIndex stridePlanes = 1, const DenseIndex strideRows = 1,
const DenseIndex strideCols = 1) {
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex>>
in(input);
TensorRef<Tensor<typename internal::traits<OutputBackward>::Scalar,
internal::traits<OutputBackward>::NumDimensions,
internal::traits<OutputBackward>::Layout, TensorIndex>>
out(output_backward);
EIGEN_STATIC_ASSERT(internal::traits<Input>::Layout ==
internal::traits<OutputBackward>::Layout,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int NumDims = internal::traits<Input>::NumDimensions;
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions ==
internal::traits<OutputBackward>::NumDimensions,
YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
YOU_MADE_A_PROGRAMMING_MISTAKE);
const TensorIndex inputPlanes =
isColMajor ? in.dimension(1) : in.dimension(NumDims - 2);
const TensorIndex inputRows =
isColMajor ? in.dimension(2) : in.dimension(NumDims - 3);
const TensorIndex inputCols =
isColMajor ? in.dimension(3) : in.dimension(NumDims - 4);
const TensorIndex outputPlanes =
isColMajor ? out.dimension(1) : out.dimension(NumDims - 2);
const TensorIndex outputRows =
isColMajor ? out.dimension(2) : out.dimension(NumDims - 3);
const TensorIndex outputCols =
isColMajor ? out.dimension(3) : out.dimension(NumDims - 4);
const TensorIndex kernelFilters =
isColMajor ? out.dimension(0) : out.dimension(NumDims - 1);
const TensorIndex kernelChannels =
isColMajor ? in.dimension(0) : in.dimension(NumDims - 1);
const TensorIndex batch =
isColMajor ? in.dimension(4) : in.dimension(NumDims - 5);
const TensorIndex kernelPlanesEff = kernelPlanes;
const TensorIndex kernelRowsEff = kernelRows;
const TensorIndex kernelColsEff = kernelCols;
const TensorIndex padPlanes = numext::maxi<Index>(
0, (outputPlanes - 1) * stridePlanes + kernelPlanesEff - inputPlanes);
const TensorIndex padRows = numext::maxi<Index>(
0, (outputRows - 1) * strideRows + kernelRowsEff - inputRows);
const TensorIndex padCols = numext::maxi<Index>(
0, (outputCols - 1) * strideCols + kernelColsEff - inputCols);
const TensorIndex padding_top_z = padPlanes / 2;
const TensorIndex padding_top = padRows / 2;
const TensorIndex padding_left = padCols / 2;
const auto expanded_out_planes = (outputPlanes - 1) * stridePlanes + 1;
const auto expanded_out_rows = (outputRows - 1) * strideRows + 1;
const auto expanded_out_cols = (outputCols - 1) * strideCols + 1;
const auto padded_out_planes = inputPlanes + kernelPlanes - 1;
const auto padded_out_rows = inputRows + kernelRows - 1;
const auto padded_out_cols = inputCols + kernelCols - 1;
const auto top_pad_planes = kernelPlanes - 1 - padding_top_z;
const auto top_pad_rows = kernelRows - 1 - padding_top;
const auto left_pad_cols = kernelCols - 1 - padding_left;
const auto bottom_pad_planes =
padded_out_planes - expanded_out_planes - top_pad_planes;
const auto bottom_pad_rows =
padded_out_rows - expanded_out_rows - top_pad_rows;
const auto right_pad_cols =
padded_out_cols - expanded_out_cols - left_pad_cols;
array<TensorIndex, 5> output_backward_shuffle;
if (isColMajor) {
output_backward_shuffle = {4, 1, 2, 3, 0};
} else {
output_backward_shuffle = {4, 1, 2, 3, 0};
}
array<TensorIndex, 5> input_shuffle;
if (isColMajor) {
input_shuffle = {0, 4, 1, 2, 3};
} else {
input_shuffle = {1, 2, 3, 0, 4};
}
DSizes<TensorIndex, 2> input_dims;
if (isColMajor) {
input_dims[0] = kernelChannels;
input_dims[1] = batch * inputPlanes * inputRows * inputCols;
} else {
input_dims[1] = kernelChannels;
input_dims[0] = inputCols * inputRows * inputPlanes * batch;
}
DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
pre_contract_dims[0] = batch * inputPlanes * inputRows * inputCols;
pre_contract_dims[1] =
kernelPlanes * kernelRows * kernelCols * kernelFilters;
} else {
pre_contract_dims[1] = inputCols * inputRows * inputPlanes * batch;
pre_contract_dims[0] =
kernelFilters * kernelCols * kernelRows * kernelPlanes;
}
array<IndexPair<TensorIndex>, 1> contract_dims;
contract_dims[0] = IndexPair<TensorIndex>(1, 0);
DSizes<TensorIndex, NumDims> post_contract_dims;
if (isColMajor) {
post_contract_dims[0] = kernelChannels;
post_contract_dims[1] = kernelPlanes;
post_contract_dims[2] = kernelRows;
post_contract_dims[3] = kernelCols;
post_contract_dims[4] = kernelFilters;
} else {
post_contract_dims[0] = kernelFilters;
post_contract_dims[1] = kernelCols;
post_contract_dims[2] = kernelRows;
post_contract_dims[3] = kernelPlanes;
post_contract_dims[4] = kernelChannels;
}
array<TensorIndex, 5> kernel_shuffle;
if (isColMajor) {
kernel_shuffle = {4, 0, 1, 2, 3};
} else {
kernel_shuffle = {1, 2, 3, 4, 0};
}
array<TensorIndex, 5> kernel_reverse;
if (isColMajor) {
kernel_reverse = {false, false, true, true, true};
} else {
kernel_reverse = {true, true, true, false, false};
}
const auto the_input =
output_backward.shuffle(output_backward_shuffle).eval();
const auto the_kernel =
input.shuffle(input_shuffle).reshape(input_dims).eval();
return choose(Cond<internal::traits<Input>::Layout == ColMajor>(),
the_kernel.contract(
the_input
.extract_volume_patches(
inputPlanes, inputRows, inputCols, 1, 1, 1,
stridePlanes, strideRows, strideCols,
top_pad_planes, bottom_pad_planes, top_pad_rows,
bottom_pad_rows, left_pad_cols, right_pad_cols)
.reshape(pre_contract_dims),
contract_dims),
the_input
.extract_volume_patches(
inputPlanes, inputRows, inputCols, 1, 1, 1,
stridePlanes, strideRows, strideCols, top_pad_planes,
bottom_pad_planes, top_pad_rows, bottom_pad_rows,
left_pad_cols, right_pad_cols)
.reshape(pre_contract_dims)
.contract(the_kernel, contract_dims))
.reshape(post_contract_dims)
.shuffle(kernel_shuffle)
.reverse(kernel_reverse);
}
}
#endif | #include "tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
static int ceil_div(int a, int b) { return (a + b - 1) / b; }
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_valid) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 4> input_backward(input_depth, input_planes, input_rows,
input_cols);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 4> output_backward(output_depth, output_planes, output_rows,
output_cols);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(od, output_i, output_j, output_k) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_valid_row_major) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 4, RowMajor> input_backward(input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(output_cols, output_rows,
output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(0), input_cols);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_planes);
EXPECT_EQ(input_backward.dimension(3), input_depth);
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(output_k, output_j, output_i, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(k, j, i, id), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_same) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 3;
const int patch_cols = 2;
const int patch_planes = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
const int output_depth = 5;
Tensor<float, 4> input_backward(input_depth, input_planes, input_rows,
input_cols);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 4> output_backward(output_depth, output_planes, output_rows,
output_cols);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
const int dz = patch_planes - 1;
const int dy = patch_rows - 1;
const int dx = patch_cols - 1;
const int forward_pad_x = dx / 2;
const int forward_pad_y = dy / 2;
const int forward_pad_z = dz / 2;
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p + forward_pad_z;
int output_j = j - r + forward_pad_y;
int output_k = k - c + forward_pad_x;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(od, output_i, output_j, output_k) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_input_same_row_major) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 3;
const int patch_planes = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
const int output_depth = 5;
Tensor<float, 4, RowMajor> input_backward(input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 4, RowMajor> output_backward(output_cols, output_rows,
output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(0), input_cols);
EXPECT_EQ(input_backward.dimension(1), input_rows);
EXPECT_EQ(input_backward.dimension(2), input_planes);
EXPECT_EQ(input_backward.dimension(3), input_depth);
const int dz = patch_planes - 1;
const int dy = patch_rows - 1;
const int dx = patch_cols - 1;
const int forward_pad_x = dx / 2;
const int forward_pad_y = dy / 2;
const int forward_pad_z = dz / 2;
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p + forward_pad_z;
int output_j = j - r + forward_pad_y;
int output_k = k - c + forward_pad_x;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(output_k, output_j, output_i, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(k, j, i, id), expected);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_input_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 5> input_backward(input_depth, input_planes, input_rows,
input_cols, num_batches);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(4), num_batches);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p;
int output_j = j - r;
int output_k = k - c;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(od, output_i, output_j, output_k, b) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_input_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 2;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
const int output_depth = 5;
Tensor<float, 5, RowMajor> input_backward(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_planes);
EXPECT_EQ(input_backward.dimension(4), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_i = i - p;
int output_j = j - r;
int output_k = k - c;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
output_backward(b, output_k, output_j, output_i, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(b, k, j, i, id), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_kernel_valid) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5> input(input_depth, input_planes, input_rows, input_cols,
1);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, 1);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel.setRandom();
kernel = CuboidConvolutionBackwardKernel(input, output_backward, patch_planes,
patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel.dimension(0), output_depth);
EXPECT_EQ(kernel.dimension(1), input_depth);
EXPECT_EQ(kernel.dimension(2), patch_planes);
EXPECT_EQ(kernel.dimension(3), patch_rows);
EXPECT_EQ(kernel.dimension(4), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
float expected = 0.0f;
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected += input(id, i, j, k, 0) *
output_backward(od, output_i, output_j,
output_k, 0);
}
}
}
}
EigenApprox(kernel(od, id, p, r, c), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_simple_cuboid_convolution_backward_kernel_valid_row_major) {
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 3;
const int input_cols = 4;
const int output_depth = 5;
const int patch_rows = 2;
const int patch_cols = 2;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5, RowMajor> input( 1, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
1, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel.setRandom();
kernel = CuboidConvolutionBackwardKernel(input, output_backward, patch_planes,
patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel.dimension(4), output_depth);
EXPECT_EQ(kernel.dimension(3), input_depth);
EXPECT_EQ(kernel.dimension(2), patch_planes);
EXPECT_EQ(kernel.dimension(1), patch_rows);
EXPECT_EQ(kernel.dimension(0), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
float expected = 0.0f;
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected += input( 0, k, j, i, id) *
output_backward( 0, output_k, output_j,
output_i, od);
}
}
}
}
EigenApprox(kernel(c, r, p, id, od), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_kernel_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5> input(input_depth, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> kernel_backward(output_depth, input_depth, patch_planes,
patch_rows, patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(3), patch_rows);
EXPECT_EQ(kernel_backward.dimension(4), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
input(id, i, j, k, b) *
output_backward(od, output_i, output_j, output_k, b);
}
}
}
}
}
EigenApprox(kernel_backward(od, id, p, r, c), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_cuboid_convolution_backward_kernel_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 5;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_rows = 5;
const int patch_cols = 5;
const int patch_planes = 3;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
const int output_planes = input_planes - patch_planes + 1;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel_backward(
patch_cols, patch_rows, patch_planes, input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols, 1, 1, 1);
EXPECT_EQ(kernel_backward.dimension(4), output_depth);
EXPECT_EQ(kernel_backward.dimension(3), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 && output_i < output_planes &&
output_j >= 0 && output_j < output_rows &&
output_k >= 0 && output_k < output_cols) {
expected +=
input(b, k, j, i, id) *
output_backward(b, output_k, output_j, output_i, od);
}
}
}
}
}
EigenApprox(kernel_backward(c, r, p, id, od), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_kernel_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 8;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 2;
const int stride_planes = 2;
const int stride_cols = 3;
const int stride_rows = 1;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
Tensor<float, 5> input(input_depth, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> kernel_backward(output_depth, input_depth, patch_planes,
patch_rows, patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(kernel_backward.dimension(0), output_depth);
EXPECT_EQ(kernel_backward.dimension(1), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(3), patch_rows);
EXPECT_EQ(kernel_backward.dimension(4), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected += input(id, i, j, k, b) *
output_backward(od, output_i / stride_planes,
output_j / stride_rows,
output_k / stride_cols, b);
}
}
}
}
}
EigenApprox(kernel_backward(od, id, p, r, c), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_kernel_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 8;
const int input_rows = 7;
const int input_cols = 9;
const int output_depth = 3;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 2;
const int stride_planes = 2;
const int stride_cols = 3;
const int stride_rows = 1;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel_backward(
patch_cols, patch_rows, patch_planes, input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
input = input.constant(2.0f) + input.random();
kernel_backward.setRandom();
kernel_backward = CuboidConvolutionBackwardKernel(
input, output_backward, patch_planes, patch_rows, patch_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(kernel_backward.dimension(4), output_depth);
EXPECT_EQ(kernel_backward.dimension(3), input_depth);
EXPECT_EQ(kernel_backward.dimension(2), patch_planes);
EXPECT_EQ(kernel_backward.dimension(1), patch_rows);
EXPECT_EQ(kernel_backward.dimension(0), patch_cols);
for (int od = 0; od < output_depth; ++od) {
for (int id = 0; id < input_depth; ++id) {
for (int p = 0; p < patch_planes; ++p) {
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
float expected = 0.0f;
for (int b = 0; b < num_batches; ++b) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected += input(b, k, j, i, id) *
output_backward(b, output_k / stride_cols,
output_j / stride_rows,
output_i / stride_planes, od);
}
}
}
}
}
EigenApprox(kernel_backward(c, r, p, id, od), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_input_valid) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 14;
const int input_rows = 13;
const int input_cols = 15;
const int patch_rows = 3;
const int patch_cols = 2;
const int patch_planes = 4;
const int stride_rows = 3;
const int stride_cols = 2;
const int stride_planes = 3;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
const int output_depth = 5;
Tensor<float, 5> input_backward(input_depth, input_planes, input_rows,
input_cols, num_batches);
Tensor<float, 5> kernel(output_depth, input_depth, patch_planes, patch_rows,
patch_cols);
Tensor<float, 5> output_backward(output_depth, output_planes, output_rows,
output_cols, num_batches);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(input_backward.dimension(4), num_batches);
EXPECT_EQ(input_backward.dimension(3), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(1), input_planes);
EXPECT_EQ(input_backward.dimension(0), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected += output_backward(od, output_i / stride_planes,
output_j / stride_rows,
output_k / stride_cols, b) *
kernel(od, id, p, r, c);
}
}
}
}
}
EigenApprox(input_backward(id, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest,
test_batched_strided_cuboid_convolution_backward_input_valid_row_major) {
const int num_batches = 13;
const int input_depth = 2;
const int input_planes = 14;
const int input_rows = 13;
const int input_cols = 15;
const int patch_rows = 3;
const int patch_cols = 2;
const int patch_planes = 4;
const int stride_rows = 3;
const int stride_cols = 2;
const int stride_planes = 3;
const int output_rows = ceil_div(input_rows - patch_rows + 1, stride_rows);
const int output_cols = ceil_div(input_cols - patch_cols + 1, stride_cols);
const int output_planes =
ceil_div(input_planes - patch_planes + 1, stride_planes);
const int output_depth = 5;
Tensor<float, 5, RowMajor> input_backward(num_batches, input_cols, input_rows,
input_planes, input_depth);
Tensor<float, 5, RowMajor> kernel(patch_cols, patch_rows, patch_planes,
input_depth, output_depth);
Tensor<float, 5, RowMajor> output_backward(
num_batches, output_cols, output_rows, output_planes, output_depth);
output_backward = output_backward.constant(11.0f) + output_backward.random();
kernel = kernel.constant(2.0f) + kernel.random();
input_backward.setRandom();
input_backward = CuboidConvolutionBackwardInput(
kernel, output_backward, input_planes, input_rows, input_cols,
stride_planes, stride_rows, stride_cols);
EXPECT_EQ(input_backward.dimension(0), num_batches);
EXPECT_EQ(input_backward.dimension(1), input_cols);
EXPECT_EQ(input_backward.dimension(2), input_rows);
EXPECT_EQ(input_backward.dimension(3), input_planes);
EXPECT_EQ(input_backward.dimension(4), input_depth);
for (int b = 0; b < num_batches; ++b) {
for (int id = 0; id < input_depth; ++id) {
for (int i = 0; i < input_planes; ++i) {
for (int j = 0; j < input_rows; ++j) {
for (int k = 0; k < input_cols; ++k) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int p = 0; p < patch_planes; ++p) {
for (int od = 0; od < output_depth; ++od) {
int output_j = j - r;
int output_k = k - c;
int output_i = i - p;
if (output_i >= 0 &&
output_i / stride_planes < output_planes &&
output_j >= 0 && output_j / stride_rows < output_rows &&
output_k >= 0 && output_k / stride_cols < output_cols &&
output_i % stride_planes == 0 &&
output_j % stride_rows == 0 &&
output_k % stride_cols == 0) {
expected +=
output_backward(b, output_k / stride_cols,
output_j / stride_rows,
output_i / stride_planes, od) *
kernel(c, r, p, id, od);
}
}
}
}
}
EigenApprox(input_backward(b, k, j, i, id), expected);
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_backward_cuboid_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
58fd0fed-e6ea-4f5d-b2d5-9570483b8c99 | cpp | google/tensorstore | single_index_slice_op | tensorstore/index_space/internal/single_index_slice_op.cc | tensorstore/index_space/single_index_slice_op_test.cc | #include "tensorstore/index_space/internal/single_index_slice_op.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
struct InputDimensionSingletonSliceInfo {
DimensionIndex new_input_dim;
Index offset;
};
struct SingletonSlicingInfo {
explicit SingletonSlicingInfo(DimensionIndex original_input_rank,
DimensionIndex new_input_rank)
: original_input_rank(original_input_rank),
new_input_rank(new_input_rank) {
std::fill_n(&original_input_dimension_info[0], original_input_rank,
InputDimensionSingletonSliceInfo{0, 0});
}
DimensionIndex original_input_rank;
DimensionIndex new_input_rank;
InputDimensionSingletonSliceInfo original_input_dimension_info[kMaxRank];
};
Result<SingletonSlicingInfo> GetSingletonSlicingInfo(
TransformRep* original, DimensionIndexBuffer* dimensions_buffer,
IndexVectorOrScalarView indices) {
const span<const DimensionIndex> dimensions(*dimensions_buffer);
const DimensionIndex num_dims = dimensions.size();
const DimensionIndex original_input_rank = original->input_rank;
const DimensionIndex new_input_rank = original_input_rank - num_dims;
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(indices, num_dims));
Result<SingletonSlicingInfo> result(tensorstore::in_place,
original_input_rank, new_input_rank);
const Index* indices_pointer =
indices.pointer ? indices.pointer : &indices.size_or_scalar;
const Index indices_stride = indices.pointer ? 1 : 0;
std::string slice_error;
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex original_input_dim = dimensions[i];
const Index index = indices_pointer[i * indices_stride];
const auto domain = original->input_dimension(original_input_dim)
.optionally_implicit_domain();
if (!Contains(domain.effective_interval(), index)) {
tensorstore::StrAppend(&slice_error, (slice_error.empty() ? "" : ", "),
"in input dimension ", original_input_dim,
" index ", index, " is outside valid domain ",
domain);
}
result->original_input_dimension_info[original_input_dim] =
InputDimensionSingletonSliceInfo{-1, index};
}
if (!slice_error.empty()) {
result = absl::OutOfRangeError(
tensorstore::StrCat("Slice mismatch: ", slice_error));
return result;
}
for (DimensionIndex original_input_dim = 0, new_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
auto& new_dim =
result->original_input_dimension_info[original_input_dim].new_input_dim;
if (new_dim == -1) continue;
new_dim = new_input_dim;
++new_input_dim;
}
dimensions_buffer->clear();
return result;
}
absl::Status PerformSingleIndexSlice(TransformRep* original_transform,
TransformRep* new_transform,
const SingletonSlicingInfo& info,
bool domain_only) {
const DimensionIndex original_input_rank = original_transform->input_rank;
const DimensionIndex new_input_rank = info.new_input_rank;
span<const InputDimensionSingletonSliceInfo> original_input_dimension_info =
info.original_input_dimension_info;
bool domain_is_explicitly_empty = false;
for (DimensionIndex original_input_dim = 0, new_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
if (original_input_dimension_info[original_input_dim].new_input_dim < 0)
continue;
const InputDimensionRef new_dim_ref =
new_transform->input_dimension(new_input_dim);
new_dim_ref = original_transform->input_dimension(original_input_dim);
if (new_dim_ref.domain().empty() && !new_dim_ref.implicit_lower_bound() &&
!new_dim_ref.implicit_upper_bound()) {
domain_is_explicitly_empty = true;
}
++new_input_dim;
}
const DimensionIndex output_rank =
domain_only ? 0 : original_transform->output_rank;
span<const OutputIndexMap> original_maps =
original_transform->output_index_maps().first(output_rank);
span<OutputIndexMap> new_maps =
new_transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const OutputIndexMap& original_map = original_maps[output_dim];
OutputIndexMap& new_map = new_maps[output_dim];
switch (original_map.method()) {
case OutputIndexMethod::constant: {
new_map.offset() = original_map.offset();
new_map.SetConstant();
new_map.stride() = 0;
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex original_input_dim =
original_map.input_dimension();
assert(original_input_dim >= 0 &&
original_input_dim < original_input_rank);
const auto slice_info =
original_input_dimension_info[original_input_dim];
const Index output_stride = original_map.stride();
const Index output_offset = original_map.offset();
if (slice_info.new_input_dim == -1) {
Index new_offset;
if (internal::MulOverflow(slice_info.offset, output_stride,
&new_offset) ||
internal::AddOverflow(new_offset, output_offset,
&new_map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension ",
output_dim, "."));
}
new_map.SetConstant();
new_map.stride() = 0;
} else {
new_map.SetSingleInputDimension(slice_info.new_input_dim);
new_map.stride() = output_stride;
new_map.offset() = output_offset;
}
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
new_map.SetConstant();
new_map.offset() = 0;
new_map.stride() = 0;
break;
}
const IndexArrayData& original_index_array_data =
original_map.index_array_data();
IndexArrayData& new_index_array_data =
new_map.SetArrayIndexing(new_input_rank);
new_index_array_data.index_range =
original_index_array_data.index_range;
Index array_byte_offset = 0;
bool has_non_zero_byte_strides = false;
for (DimensionIndex original_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
const auto slice_info =
original_input_dimension_info[original_input_dim];
const Index byte_stride =
original_index_array_data.byte_strides[original_input_dim];
if (slice_info.new_input_dim == -1) {
array_byte_offset = internal::wrap_on_overflow::Add(
array_byte_offset, internal::wrap_on_overflow::Multiply(
byte_stride, slice_info.offset));
} else {
new_index_array_data.byte_strides[slice_info.new_input_dim] =
byte_stride;
if (byte_stride != 0) has_non_zero_byte_strides = true;
}
}
Index output_stride = original_map.stride();
Index output_offset = original_map.offset();
if (has_non_zero_byte_strides) {
new_index_array_data.element_pointer = AddByteOffset(
original_index_array_data.element_pointer, array_byte_offset);
} else {
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
original_index_array_data.element_pointer
.byte_strided_pointer()[array_byte_offset],
new_index_array_data.index_range, &output_offset,
&output_stride));
new_map.SetConstant();
}
new_map.stride() = output_stride;
new_map.offset() = output_offset;
break;
}
}
}
new_transform->input_rank = new_input_rank;
new_transform->output_rank = output_rank;
NormalizeImplicitBounds(*new_transform);
internal_index_space::DebugCheckInvariants(new_transform);
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplySingleIndexSlice(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView indices,
bool domain_only) {
TransformRep* rep = TransformAccess::rep(transform);
auto slicing_info = GetSingletonSlicingInfo(rep, dimensions, indices);
if (!slicing_info) return slicing_info.status();
auto new_rep = NewOrMutableRep(rep, slicing_info->new_input_rank,
rep->output_rank, domain_only);
TENSORSTORE_RETURN_IF_ERROR(
PerformSingleIndexSlice(rep, new_rep.get(), *slicing_info, domain_only));
return TransformAccess::Make<IndexTransform<>>(new_rep);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(SingleIndexSliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<1, 3>()
.input_origin({2})
.input_shape({4})
.input_labels({"y"})
.output_constant(0, 2)
.output_single_input_dimension(1, 0)
.output_constant(2, 4)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 4}, {3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexSlice({2, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").IndexSlice({2, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(SingleIndexSliceTest, ImplicitLowerBound) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.implicit_lower_bounds({1, 1, 0})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<1, 3>()
.input_origin({2})
.implicit_lower_bounds({1})
.input_shape({4})
.input_labels({"y"})
.output_constant(0, -7)
.output_single_input_dimension(1, 0)
.output_constant(2, 4)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{-7, 3, 4}, {3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexSlice({-7, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(SingleIndexSliceTest, DimSubsetUniformIndexArrayRetained) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice(3),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 3 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 3}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetUniformIndexArrayEliminated) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice(3),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 3 * 3)
.output_constant(2, 3)
.output_constant(3, 4 + 1 * 6)
.Finalize()
.value(),
{{{4, 3, 3}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetNonUniform) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice({3, 4}),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 4)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 4 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 4}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetNonUniformLabeled) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice({3, 4}),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 4)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.input_labels({"x"})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 4 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 4}, {4}}});
}
TEST(SingleIndexSliceTest, EmptyDomain) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({0, 3})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 2, 7, 0)
.output_index_array(1, 4, 3,
MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(1).IndexSlice({3}),
{},
IndexTransformBuilder<1, 2>()
.input_origin({1})
.input_shape({0})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({1})
.input_shape({0})
.input_labels({"x"})
.output_single_input_dimension(0, 2, 7, 0)
.output_constant(1, 0)
.Finalize()
.value(),
{});
}
TEST(ErrorHandlingTest, DimensionSelectionRankMismatch) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().IndexSlice(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* does not match number of "
"indices .*");
}
TEST(ErrorHandlingTest, OutOfBounds) {
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({-10})
.input_shape({15})
.Finalize()
.value(),
AllDims().IndexSlice({5}),
absl::StatusCode::kOutOfRange,
"Slice mismatch: .* is outside valid domain .*");
}
TEST(ErrorHandlingTest, OutOfBoundsInfinity) {
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({-kInfIndex})
.input_shape({15})
.Finalize()
.value(),
AllDims().IndexSlice({-kInfIndex}),
absl::StatusCode::kOutOfRange,
"Slice mismatch: .* is outside valid domain .*");
}
TEST(ErrorHandlingTest, SingleInputDimensionMapIntegerOverflow) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, std::numeric_limits<Index>::max(),
1, 0)
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension.*",
IndexDomainBuilder<0>().Finalize().value());
}
TEST(ErrorHandlingTest, IndexArrayMapIntegerOverflow) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, std::numeric_limits<Index>::max(), 1,
MakeArray<Index>({0, 1, 2}))
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension.*",
IndexDomainBuilder<0>().Finalize().value());
}
TEST(ErrorHandlingTest, IndexArrayMapOutOfBounds) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1, 2}),
IndexInterval::Closed(-5, -3))
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kOutOfRange,
"Index .* is outside valid range .*",
IndexDomainBuilder<0>().Finalize().value());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/single_index_slice_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/single_index_slice_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c3c28df6-a38d-4086-b3c6-3e6008037f50 | cpp | google/arolla | struct_io | arolla/io/struct_io.cc | arolla/io/struct_io_test.cc | #include "arolla/io/struct_io.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla::struct_io_impl {
std::vector<std::string> SuggestAvailableNames(
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> names;
names.reserve(slots.size());
for (const auto& [name, _] : slots) {
names.emplace_back(name);
}
return names;
}
absl::Status ValidateStructSlots(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
size_t struct_size) {
for (const auto& [name, slot] : slots) {
if (slot.byte_offset() + slot.GetType()->type_layout().AllocSize() >
struct_size) {
return absl::InvalidArgumentError(
absl::StrCat("slot '", name, "' is not within the struct"));
}
}
return absl::OkStatus();
}
StructIO::StructIO(
const absl::flat_hash_map<std::string, TypedSlot>& struct_slots,
const absl::flat_hash_map<std::string, TypedSlot>& frame_slots) {
QTypePtr b = GetQType<bool>();
std::vector<QTypePtr> types32{GetQType<float>(), GetQType<int32_t>()};
std::vector<QTypePtr> types64{GetQType<double>(), GetQType<int64_t>(),
GetQType<uint64_t>(), GetOptionalQType<float>(),
GetOptionalQType<int32_t>()};
static_assert(sizeof(OptionalValue<float>) == 8);
static_assert(sizeof(OptionalValue<int32_t>) == 8);
static_assert(std::is_trivially_copyable_v<OptionalValue<float>>);
static_assert(std::is_trivially_copyable_v<OptionalValue<int32_t>>);
for (const auto& [name, frame_slot] : frame_slots) {
QTypePtr t = frame_slot.GetType();
size_t struct_offset = struct_slots.at(name).byte_offset();
size_t frame_offset = frame_slot.byte_offset();
if (t == b) {
offsets_bool_.emplace_back(struct_offset, frame_offset);
} else if (absl::c_find(types32, t) != types32.end()) {
DCHECK_EQ(t->type_layout().AllocSize(), 4);
offsets_32bits_.emplace_back(struct_offset, frame_offset);
} else if (absl::c_find(types64, t) != types64.end()) {
DCHECK_EQ(t->type_layout().AllocSize(), 8);
offsets_64bits_.emplace_back(struct_offset, frame_offset);
} else {
offsets_other_[t].emplace_back(struct_offset, frame_offset);
}
}
std::sort(offsets_bool_.begin(), offsets_bool_.end());
std::sort(offsets_32bits_.begin(), offsets_32bits_.end());
std::sort(offsets_64bits_.begin(), offsets_64bits_.end());
for (auto& [_, v] : offsets_other_) {
std::sort(v.begin(), v.end());
}
}
void StructIO::CopyStructToFrame(const void* struct_ptr, FramePtr frame) const {
const char* src_base = reinterpret_cast<const char*>(struct_ptr);
for (const auto& [src, dst] : offsets_bool_) {
std::memcpy(frame.GetRawPointer(dst), src_base + src, sizeof(bool));
}
for (const auto& [src, dst] : offsets_32bits_) {
std::memcpy(frame.GetRawPointer(dst), src_base + src, 4);
}
for (const auto& [src, dst] : offsets_64bits_) {
std::memcpy(frame.GetRawPointer(dst), src_base + src, 8);
}
for (const auto& [t, offsets] : offsets_other_) {
for (const auto& [src, dst] : offsets) {
t->UnsafeCopy(src_base + src, frame.GetRawPointer(dst));
}
}
}
void StructIO::CopyFrameToStruct(ConstFramePtr frame, void* struct_ptr) const {
char* dst_base = reinterpret_cast<char*>(struct_ptr);
for (const auto& [dst, src] : offsets_bool_) {
std::memcpy(dst_base + dst, frame.GetRawPointer(src), sizeof(bool));
}
for (const auto& [dst, src] : offsets_32bits_) {
std::memcpy(dst_base + dst, frame.GetRawPointer(src), 4);
}
for (const auto& [dst, src] : offsets_64bits_) {
std::memcpy(dst_base + dst, frame.GetRawPointer(src), 8);
}
for (const auto& [t, offsets] : offsets_other_) {
for (const auto& [dst, src] : offsets) {
t->UnsafeCopy(frame.GetRawPointer(src), dst_base + dst);
}
}
}
} | #include "arolla/io/struct_io.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAreArray;
struct TestStruct {
int32_t a = 1;
bool b = true;
float c = 3.0f;
int32_t d = 4;
int32_t j = 10;
DenseArray<int32_t> e;
Bytes f;
double g = 7.0;
int64_t h = 8;
OptionalValue<int32_t> i = 9;
OptionalValue<float> k = 11.0f;
};
#define STRUCT_SLOT(STRUCT, FIELD) \
{ \
#FIELD, TypedSlot::UnsafeFromOffset(GetQType<typeof(STRUCT::FIELD)>(), \
offsetof(STRUCT, FIELD)) \
}
absl::flat_hash_map<std::string, TypedSlot> GetStructSlots() {
return absl::flat_hash_map<std::string, TypedSlot>{
STRUCT_SLOT(TestStruct, a),
STRUCT_SLOT(TestStruct, b),
STRUCT_SLOT(TestStruct, c),
STRUCT_SLOT(TestStruct, d),
STRUCT_SLOT(TestStruct, e),
STRUCT_SLOT(TestStruct, f),
STRUCT_SLOT(TestStruct, g),
STRUCT_SLOT(TestStruct, h),
STRUCT_SLOT(TestStruct, i),
STRUCT_SLOT(TestStruct, j),
STRUCT_SLOT(TestStruct, k),
};
}
TEST(StructIO, GetNamesAndTypes) {
ASSERT_OK_AND_ASSIGN(auto input_loader,
StructInputLoader<TestStruct>::Create(GetStructSlots()));
ASSERT_OK_AND_ASSIGN(
auto slot_listener,
StructSlotListener<TestStruct>::Create(GetStructSlots()));
std::vector<std::string> expected_names{"a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k"};
EXPECT_THAT(input_loader->SuggestAvailableNames(),
UnorderedElementsAreArray(expected_names));
EXPECT_THAT(slot_listener->SuggestAvailableNames(),
UnorderedElementsAreArray(expected_names));
EXPECT_EQ(input_loader->GetQTypeOf("e"), GetDenseArrayQType<int32_t>());
EXPECT_EQ(slot_listener->GetQTypeOf("g"), GetQType<double>());
}
TEST(StructIO, BasicTest) {
ASSERT_OK_AND_ASSIGN(auto input_loader,
StructInputLoader<TestStruct>::Create(GetStructSlots()));
ASSERT_OK_AND_ASSIGN(
auto slot_listener,
StructSlotListener<TestStruct>::Create(GetStructSlots()));
FrameLayout::Builder bldr;
auto a_slot = bldr.AddSlot<int32_t>();
auto d_slot = bldr.AddSlot<int32_t>();
auto j_slot = bldr.AddSlot<int32_t>();
auto k_slot = bldr.AddSlot<OptionalValue<float>>();
auto b_slot = bldr.AddSlot<bool>();
auto c_slot = bldr.AddSlot<float>();
auto i_slot = bldr.AddSlot<OptionalValue<int32_t>>();
FrameLayout layout = std::move(bldr).Build();
absl::flat_hash_map<std::string, TypedSlot> frame_slots{
{"a", TypedSlot::FromSlot(a_slot)},
{"d", TypedSlot::FromSlot(d_slot)},
{"j", TypedSlot::FromSlot(j_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
{"i", TypedSlot::FromSlot(i_slot)},
{"k", TypedSlot::FromSlot(k_slot)},
};
ASSERT_OK_AND_ASSIGN(auto bound_loader, input_loader->Bind(frame_slots));
ASSERT_OK_AND_ASSIGN(auto bound_listener, slot_listener->Bind(frame_slots));
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
TestStruct ts;
ASSERT_OK(bound_loader(ts, frame));
EXPECT_EQ(frame.Get(a_slot), 1);
EXPECT_EQ(frame.Get(b_slot), true);
EXPECT_EQ(frame.Get(c_slot), 3.0f);
EXPECT_EQ(frame.Get(d_slot), 4);
EXPECT_EQ(frame.Get(i_slot), 9);
EXPECT_EQ(frame.Get(j_slot), 10);
EXPECT_EQ(frame.Get(k_slot), 11.0f);
frame.Set(a_slot, 100);
frame.Set(b_slot, false);
frame.Set(c_slot, 3.14f);
frame.Set(d_slot, 57);
frame.Set(i_slot, std::nullopt);
frame.Set(j_slot, 19);
frame.Set(k_slot, 0.5f);
ASSERT_OK(bound_listener(frame, &ts));
EXPECT_EQ(ts.a, 100);
EXPECT_EQ(ts.b, false);
EXPECT_EQ(ts.c, 3.14f);
EXPECT_EQ(ts.d, 57);
EXPECT_EQ(ts.i, std::nullopt);
EXPECT_EQ(ts.j, 19);
EXPECT_EQ(ts.k, 0.5f);
}
TEST(StructIO, ComplicatedQType) {
ASSERT_OK_AND_ASSIGN(auto input_loader,
StructInputLoader<TestStruct>::Create(GetStructSlots()));
ASSERT_OK_AND_ASSIGN(
auto slot_listener,
StructSlotListener<TestStruct>::Create(GetStructSlots()));
FrameLayout::Builder bldr;
auto f_slot = bldr.AddSlot<Bytes>();
auto e_slot = bldr.AddSlot<DenseArray<int32_t>>();
FrameLayout layout = std::move(bldr).Build();
absl::flat_hash_map<std::string, TypedSlot> frame_slots{
{"e", TypedSlot::FromSlot(e_slot)},
{"f", TypedSlot::FromSlot(f_slot)},
};
ASSERT_OK_AND_ASSIGN(auto bound_loader, input_loader->Bind(frame_slots));
ASSERT_OK_AND_ASSIGN(auto bound_listener, slot_listener->Bind(frame_slots));
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
TestStruct ts;
ts.e = CreateDenseArray<int32_t>({1, 2, 3});
ts.f = Bytes("abacaba");
ASSERT_OK(bound_loader(ts, frame));
ts.e = DenseArray<int32_t>();
ts.f = Bytes();
EXPECT_THAT(frame.Get(e_slot), ElementsAre(1, 2, 3));
EXPECT_EQ(frame.Get(f_slot), Bytes("abacaba"));
ASSERT_OK(bound_listener(frame, &ts));
EXPECT_THAT(ts.e, ElementsAre(1, 2, 3));
EXPECT_EQ(ts.f, Bytes("abacaba"));
}
TEST(StructIO, Errors) {
absl::flat_hash_map<std::string, TypedSlot> struct_slots1{
{"a", TypedSlot::UnsafeFromOffset(GetQType<int32_t>(), 0)},
{"b", TypedSlot::UnsafeFromOffset(GetQType<int32_t>(), 5)},
{"c", TypedSlot::UnsafeFromOffset(GetQType<int32_t>(), 100500)},
};
EXPECT_THAT(StructInputLoader<TestStruct>::Create(struct_slots1),
StatusIs(absl::StatusCode::kInvalidArgument,
"slot 'c' is not within the struct"));
absl::flat_hash_map<std::string, TypedSlot> struct_slots2{
{"a", TypedSlot::UnsafeFromOffset(GetQType<int32_t>(), 4)},
{"b", TypedSlot::UnsafeFromOffset(GetQType<int32_t>(),
sizeof(TestStruct) - 3)},
{"c", TypedSlot::UnsafeFromOffset(GetQType<int32_t>(), 0)},
};
EXPECT_THAT(StructSlotListener<TestStruct>::Create(struct_slots2),
StatusIs(absl::StatusCode::kInvalidArgument,
"slot 'b' is not within the struct"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/struct_io.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/struct_io_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b3ddb9f4-1b05-4c52-952e-978d27be77a3 | cpp | tensorflow/tensorflow | debug_data_dumper | tensorflow/core/util/debug_data_dumper.cc | tensorflow/core/util/debug_data_dumper_test.cc | #include "tensorflow/core/util/debug_data_dumper.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
DebugDataDumper* DebugDataDumper::Global() {
static DebugDataDumper* global_instance_ = new DebugDataDumper();
return global_instance_;
}
DebugDataDumper::DebugDataDumper() { LoadEnvvars(); }
void DebugDataDumper::LoadEnvvars() {
const char* dump_wrapped = getenv("TF_DUMP_GRAPH_WRAPPED");
dump_wrapped_ = static_cast<bool>(dump_wrapped);
const char* name_filter = getenv("TF_DUMP_GRAPH_NAME_FILTER");
name_filter_ =
name_filter ? std::optional<std::string>{name_filter} : std::nullopt;
const char* groups_filter = getenv("TF_DUMP_GRAPH_GROUPS");
groups_filter_ =
groups_filter ? std::set<std::string>(absl::StrSplit(groups_filter, ','))
: std::set<std::string>({kDebugGroupMain});
}
bool DebugDataDumper::ShouldDump(const std::string& name,
const std::string& group) const {
if (!dump_wrapped_ && absl::StartsWith(name, "__wrapped__")) return false;
if (name_filter_ == std::nullopt) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not set";
return false;
}
if (!absl::EqualsIgnoreCase(*name_filter_, "*") &&
!absl::StrContains(name, *name_filter_)) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not '*' and "
<< "it is not contained by the graph name";
return false;
}
if (groups_filter_.find(group) == groups_filter_.end() &&
groups_filter_.find("*") == groups_filter_.end())
return false;
return true;
}
void DebugDataDumper::DumpOpCreationStackTraces(const std::string& name,
const std::string& group,
const std::string& tag,
const Graph* graph) {
if (!ShouldDump(name, group)) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
DumpToFile(dump_filename, "", ".csv", "StackTrace",
[graph, &dump_filename](WritableFile* file) {
auto status = file->Append("node_id,node_name,stackframes\n");
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
for (Node* node : graph->nodes()) {
auto stack_trace = node->GetStackTrace();
if (stack_trace == nullptr) continue;
int node_id = node->id();
const std::string& node_name = node->name();
std::vector<std::string> stackframes;
stackframes.reserve(stack_trace->ToFrames().size());
for (auto& frame : stack_trace->ToFrames()) {
stackframes.push_back(
absl::StrFormat("%s(%d): %s", frame.file_name,
frame.line_number, frame.function_name));
}
status = file->Append(
absl::StrFormat("%d,%s,%s\n", node_id, node_name,
absl::StrJoin(stackframes, ";")));
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
}
return file->Close();
});
}
void DebugDataDumper::DumpGraph(const std::string& name,
const std::string& group,
const std::string& tag, const Graph* graph,
const FunctionLibraryDefinition* func_lib_def,
bool bypass_filter) {
if (!ShouldDump(name, group) && !bypass_filter) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
if (dump_filename.size() > 255) {
LOG(WARNING) << "Failed to dump graph " << dump_filename << " to "
<< ", because the file name is longer than 255";
return;
}
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
if (func_lib_def) {
FunctionLibraryDefinition reachable_lib_def =
func_lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
}
DumpGraphDefToFile(dump_filename, graph_def);
}
std::string DebugDataDumper::GetDumpFilename(const std::string& name,
const std::string& group,
const std::string& tag) {
std::string dump_name = name.empty() ? "unknown_graph" : name;
return absl::StrFormat("%s.%04d.%s.%s", dump_name, GetNextDumpId(name), group,
tag);
}
} | #include "tensorflow/core/util/debug_data_dumper.h"
#include <string>
#include "absl/strings/str_format.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(DebugDataDumper, NoPrefixTest) {
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, NoNameFilterTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, ShouldDumpTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DoNotDumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
setenv("TF_DUMP_GRAPH_GROUPS", "main,bridge_phase1_clustering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
setenv("TF_DUMP_GRAPH_WRAPPED", "true", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
}
TEST(DebugDataDumper, DumpFileBasenameTest) {
EXPECT_EQ("DumpFileBasenameTest1.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag1"));
EXPECT_EQ("DumpFileBasenameTest1.0001.main.tag2",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag2"));
EXPECT_EQ("DumpFileBasenameTest2.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest2",
kDebugGroupMain, "tag1"));
}
TEST(DebugDataDumper, DumpGraphToFileTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpGraph("DumpGraphToFileTest", kDebugGroupMain, "tag",
&graph, nullptr, false);
std::string dumpFilename =
io::JoinPath(dir, "DumpGraphToFileTest.0000.main.tag.pbtxt");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, DumpGraphLongFileNameCrashTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::string name = std::string(256, 'x');
DEBUG_DATA_DUMPER()->DumpGraph(name, kDebugGroupMain, "tag", &graph, nullptr,
false);
std::string dumpFilename = io::JoinPath(
dir, absl::StrFormat("%s.0000.main.tag.pbtxt", name.c_str()));
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
TEST(DebugDataDumper, DumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_OP_CREATION_STACKTRACES", "1", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.csv");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, NoDumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.json");
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_data_dumper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/debug_data_dumper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0018a36-efb1-402e-a04d-3986a28f728c | cpp | google/tensorstore | zarr3_sharding_indexed | tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc | tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed_test.cc | #include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/bit_vec.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/flow_sender_operation_state.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/std_vector.h"
#include "tensorstore/internal/estimate_heap_usage/std_optional.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/util/execution/result_sender.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
namespace {
using ::tensorstore::internal_kvstore::DeleteRangeEntry;
using ::tensorstore::internal_kvstore::kReadModifyWrite;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
class ShardIndexKeyValueStore : public kvstore::Driver {
public:
explicit ShardIndexKeyValueStore(kvstore::DriverPtr base,
ShardIndexLocation index_location,
int64_t index_size_in_bytes)
: base_(std::move(base)),
index_location_(index_location),
index_size_in_bytes_(index_size_in_bytes) {}
Future<kvstore::ReadResult> Read(kvstore::Key key,
kvstore::ReadOptions options) override {
assert(options.byte_range == OptionalByteRangeRequest{});
switch (index_location_) {
case ShardIndexLocation::kStart:
options.byte_range =
OptionalByteRangeRequest::Range(0, index_size_in_bytes_);
break;
case ShardIndexLocation::kEnd:
options.byte_range =
OptionalByteRangeRequest::SuffixLength(index_size_in_bytes_);
break;
}
return MapFutureError(
InlineExecutor{},
[](const absl::Status& status) {
return internal::ConvertInvalidArgumentToFailedPrecondition(status);
},
base_->Read(std::move(key), std::move(options)));
}
std::string DescribeKey(std::string_view key) override {
return tensorstore::StrCat("shard index in ", base_->DescribeKey(key));
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
}
kvstore::Driver* base() { return base_.get(); }
private:
kvstore::DriverPtr base_;
ShardIndexLocation index_location_;
int64_t index_size_in_bytes_;
};
class ShardIndexCache
: public internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache>;
public:
using ReadData = ShardIndex;
class Entry : public Base::Entry {
public:
using OwningCache = ShardIndexCache;
size_t ComputeReadDataSizeInBytes(const void* read_data) override {
const auto& cache = GetOwningCache(*this);
return read_data
? cache.shard_index_params().num_entries * sizeof(uint64_t) * 2
: 0;
}
std::string GetKeyValueStoreKey() override {
return GetOwningCache(*this).base_kvstore_path_;
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
std::shared_ptr<ReadData> read_data;
if (value) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndex(*value,
GetOwningCache(*this).shard_index_params()),
static_cast<void>(execution::set_error(receiver, _)));
read_data = std::make_shared<ReadData>(std::move(shard_index));
}
execution::set_value(receiver, std::move(read_data));
});
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
ABSL_UNREACHABLE();
}
explicit ShardIndexCache(kvstore::DriverPtr base_kvstore,
std::string base_kvstore_path, Executor executor,
ShardIndexParameters&& params)
: Base(kvstore::DriverPtr(new ShardIndexKeyValueStore(
std::move(base_kvstore), params.index_location,
params.index_codec_state->encoded_size()))),
base_kvstore_path_(std::move(base_kvstore_path)),
executor_(std::move(executor)),
shard_index_params_(std::move(params)) {}
ShardIndexKeyValueStore* shard_index_kvstore_driver() {
return static_cast<ShardIndexKeyValueStore*>(this->Base::kvstore_driver());
}
kvstore::Driver* base_kvstore_driver() {
return shard_index_kvstore_driver()->base();
}
const std::string& base_kvstore_path() const { return base_kvstore_path_; }
const Executor& executor() { return executor_; }
span<const Index> grid_shape() const {
return span<const Index>(shard_index_params_.index_shape.data(),
shard_index_params_.index_shape.size() - 1);
}
const ShardIndexParameters& shard_index_params() const {
return shard_index_params_;
}
std::string base_kvstore_path_;
Executor executor_;
ShardIndexParameters shard_index_params_;
};
class ShardedKeyValueStoreWriteCache
: public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache>;
public:
using ReadData = ShardEntries;
explicit ShardedKeyValueStoreWriteCache(
internal::CachePtr<ShardIndexCache> shard_index_cache)
: Base(kvstore::DriverPtr(shard_index_cache->base_kvstore_driver())),
shard_index_cache_(std::move(shard_index_cache)) {}
class Entry : public Base::Entry {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
size_t ComputeReadDataSizeInBytes(const void* data) override {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
ShardEntries entries;
const auto& shard_index_params =
GetOwningCache(*this).shard_index_params();
if (value) {
TENSORSTORE_ASSIGN_OR_RETURN(
entries, DecodeShard(*value, shard_index_params),
static_cast<void>(execution::set_error(receiver, _)));
} else {
entries.entries.resize(shard_index_params.num_entries);
}
execution::set_value(
receiver, std::make_shared<ShardEntries>(std::move(entries)));
});
}
void DoEncode(std::shared_ptr<const ShardEntries> data,
EncodeReceiver receiver) override {
TENSORSTORE_ASSIGN_OR_RETURN(
auto encoded_shard,
EncodeShard(*data, GetOwningCache(*this).shard_index_params()),
static_cast<void>(execution::set_error(receiver, _)));
execution::set_value(receiver, std::move(encoded_shard));
}
std::string GetKeyValueStoreKey() override {
return GetOwningCache(*this).base_kvstore_path();
}
};
class TransactionNode : public Base::TransactionNode,
public internal_kvstore::AtomicMultiPhaseMutation {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
using Base::TransactionNode::TransactionNode;
absl::Mutex& mutex() override { return this->mutex_; }
void PhaseCommitDone(size_t next_phase) override {}
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
void Abort() override {
this->AbortRemainingPhases();
Base::TransactionNode::Abort();
}
std::string DescribeKey(std::string_view key) override {
auto& cache = GetOwningCache(*this);
return tensorstore::StrCat(
DescribeInternalKey(key, cache.shard_index_params().grid_shape()),
" in ",
cache.kvstore_driver()->DescribeKey(cache.base_kvstore_path()));
}
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void StartApply();
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override;
void MergeForWriteback(bool conditional);
void RecordEntryWritebackError(
internal_kvstore::ReadModifyWriteEntry& entry,
absl::Status error) override {
absl::MutexLock lock(&mutex_);
if (apply_status_.ok()) {
apply_status_ = std::move(error);
}
}
void Revoke() override {
Base::TransactionNode::Revoke();
{ UniqueWriterLock(*this); }
this->RevokeAllEntries();
}
void WritebackSuccess(ReadState&& read_state) override;
void WritebackError() override;
void InvalidateReadState() override;
bool MultiPhaseReadsCommitted() override { return this->reads_committed_; }
void Read(
internal_kvstore::ReadModifyWriteEntry& entry,
kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options,
kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override {
this->AsyncCache::TransactionNode::Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(
GetOwningCache(*this).executor(),
[&entry,
if_not_equal =
std::move(options.generation_conditions.if_not_equal),
receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
execution::set_error(receiver, future.result().status());
return;
}
execution::submit(HandleShardReadSuccess(entry, if_not_equal),
receiver);
}));
}
static Result<kvstore::ReadResult> HandleShardReadSuccess(
internal_kvstore::ReadModifyWriteEntry& entry,
const StorageGeneration& if_not_equal) {
auto& self = static_cast<TransactionNode&>(entry.multi_phase());
TimestampedStorageGeneration stamp;
std::shared_ptr<const ShardEntries> entries;
{
AsyncCache::ReadLock<ShardEntries> lock{self};
stamp = lock.stamp();
entries = lock.shared_data();
}
if (!StorageGeneration::IsUnknown(stamp.generation) &&
stamp.generation == if_not_equal) {
return kvstore::ReadResult::Unspecified(std::move(stamp));
}
if (StorageGeneration::IsDirty(stamp.generation)) {
stamp.generation =
StorageGeneration::AddLayer(std::move(stamp.generation));
}
auto entry_id = InternalKeyToEntryId(entry.key_);
const auto& shard_entry = entries->entries[entry_id];
if (!shard_entry) {
return kvstore::ReadResult::Missing(std::move(stamp));
} else {
return kvstore::ReadResult::Value(*shard_entry, std::move(stamp));
}
}
ApplyReceiver apply_receiver_;
ApplyOptions apply_options_;
absl::Status apply_status_;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
const internal::CachePtr<ShardIndexCache>& shard_index_cache() const {
return shard_index_cache_;
}
const Executor& executor() { return shard_index_cache()->executor(); }
const ShardIndexParameters& shard_index_params() const {
return shard_index_cache_->shard_index_params();
}
int64_t num_entries_per_shard() const {
return shard_index_cache_->shard_index_params().num_entries;
}
const std::string& base_kvstore_path() const {
return shard_index_cache_->base_kvstore_path();
}
internal::CachePtr<ShardIndexCache> shard_index_cache_;
};
void ShardedKeyValueStoreWriteCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
internal_kvstore::InvalidateReadState(phases_);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::DoApply(
ApplyOptions options, ApplyReceiver receiver) {
apply_receiver_ = std::move(receiver);
apply_options_ = options;
apply_status_ = absl::OkStatus();
GetOwningCache(*this).executor()([this] { this->StartApply(); });
}
void ShardedKeyValueStoreWriteCache::TransactionNode::StartApply() {
RetryAtomicWriteback(apply_options_.staleness_bound);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) {
if (!apply_status_.ok()) {
execution::set_error(std::exchange(apply_receiver_, {}),
std::exchange(apply_status_, {}));
return;
}
auto& self = *this;
GetOwningCache(*this).executor()([&self] {
TimestampedStorageGeneration stamp;
bool mismatch = false;
bool modified = false;
int64_t num_entries = 0;
auto& cache = GetOwningCache(self);
const int64_t num_entries_per_shard = cache.num_entries_per_shard();
for (auto& entry : self.phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto [begin_id, end_id] = InternalKeyRangeToEntryRange(
dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard);
modified = true;
num_entries += end_id - begin_id;
continue;
}
auto& buffered_entry =
static_cast<AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry&>(
entry);
if (buffered_entry.value_state_ != kvstore::ReadResult::kUnspecified) {
modified = true;
++num_entries;
}
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation)) {
if (!StorageGeneration::IsUnknown(stamp.generation) &&
StorageGeneration::Clean(stamp.generation) !=
StorageGeneration::Clean(entry_stamp.generation)) {
mismatch = true;
break;
} else {
stamp = entry_stamp;
}
}
}
if (mismatch) {
self.apply_options_.staleness_bound = absl::Now();
self.StartApply();
return;
}
if (!modified && StorageGeneration::IsUnknown(stamp.generation) &&
self.apply_options_.apply_mode !=
ApplyOptions::ApplyMode::kSpecifyUnchanged) {
internal::AsyncCache::ReadState update;
update.stamp = TimestampedStorageGeneration::Unconditional();
execution::set_value(std::exchange(self.apply_receiver_, {}),
std::move(update));
return;
}
if (!StorageGeneration::IsUnknown(stamp.generation) ||
num_entries != num_entries_per_shard) {
self.internal::AsyncCache::TransactionNode::Read(
{self.apply_options_.staleness_bound})
.ExecuteWhenReady([&self](ReadyFuture<const void> future) {
if (!future.result().ok()) {
execution::set_error(std::exchange(self.apply_receiver_, {}),
future.result().status());
return;
}
GetOwningCache(self).executor()(
[&self] { self.MergeForWriteback(true); });
});
return;
}
self.MergeForWriteback(false);
});
}
void ShardedKeyValueStoreWriteCache::TransactionNode::MergeForWriteback(
bool conditional) {
TimestampedStorageGeneration stamp;
ShardEntries new_entries;
if (conditional) {
auto lock = internal::AsyncCache::ReadLock<ShardEntries>{*this};
stamp = lock.stamp();
new_entries = *lock.shared_data();
} else {
stamp = TimestampedStorageGeneration::Unconditional();
}
auto& cache = GetOwningCache(*this);
const int64_t num_entries_per_shard = cache.num_entries_per_shard();
const bool has_existing_entries = !new_entries.entries.empty();
new_entries.entries.resize(num_entries_per_shard);
bool mismatch = false;
bool changed = false;
for (auto& entry : phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto [begin_id, end_id] = InternalKeyRangeToEntryRange(
dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard);
if (has_existing_entries) {
for (EntryId id = begin_id; id < end_id; ++id) {
new_entries.entries[id] = std::nullopt;
}
}
changed = true;
continue;
}
auto& buffered_entry =
static_cast<internal_kvstore::AtomicMultiPhaseMutation::
BufferedReadModifyWriteEntry&>(entry);
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation) &&
StorageGeneration::Clean(entry_stamp.generation) !=
StorageGeneration::Clean(stamp.generation)) {
mismatch = true;
break;
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kUnspecified ||
!StorageGeneration::IsInnerLayerDirty(entry_stamp.generation)) {
continue;
}
auto entry_id = InternalKeyToEntryId(buffered_entry.key_);
auto& new_entry = new_entries.entries[entry_id];
if (buffered_entry.value_state_ == kvstore::ReadResult::kValue) {
new_entry = buffered_entry.value_;
changed = true;
} else if (new_entry) {
new_entry = std::nullopt;
changed = true;
} else if (!conditional) {
changed = true;
}
}
if (mismatch) {
apply_options_.staleness_bound = absl::Now();
this->StartApply();
return;
}
internal::AsyncCache::ReadState update;
update.stamp = std::move(stamp);
if (changed) {
update.stamp.generation.MarkDirty();
}
update.data = std::make_shared<ShardEntries>(std::move(new_entries));
execution::set_value(std::exchange(apply_receiver_, {}), std::move(update));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackSuccess(
ReadState&& read_state) {
for (auto& entry : phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
internal_kvstore::WritebackSuccess(static_cast<DeleteRangeEntry&>(entry));
} else {
internal_kvstore::WritebackSuccess(
static_cast<internal_kvstore::ReadModifyWriteEntry&>(entry),
read_state.stamp);
}
}
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackSuccess(std::move(read_state));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackError() {
internal_kvstore::WritebackError(phases_);
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackError();
}
struct ShardedKeyValueStoreSpecData {
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
kvstore::Spec base;
std::vector<Index> grid_shape;
internal_zarr3::ZarrCodecChainSpec index_codecs;
ShardIndexLocation index_location;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardedKeyValueStoreSpecData,
internal_json_binding::NoOptions,
IncludeDefaults,
::nlohmann::json::object_t)
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.cache_pool, x.data_copy_concurrency, x.base, x.grid_shape,
x.index_codecs, x.index_location);
};
};
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
ShardedKeyValueStoreSpecData,
jb::Object(
jb::Member("base",
jb::Projection<&ShardedKeyValueStoreSpecData::base>()),
jb::Member(
"grid_shape",
jb::Projection<&ShardedKeyValueStoreSpecData::grid_shape>(
jb::Validate([](const auto& options,
auto* obj) { return ValidateGridShape(*obj); },
jb::ChunkShapeVector(nullptr)))),
jb::Member("index_codecs",
jb::Projection<&ShardedKeyValueStoreSpecData::index_codecs>(
internal_zarr3::ZarrCodecChainJsonBinder<
false>)),
jb::Member(
"index_location",
jb::Projection<&ShardedKeyValueStoreSpecData::index_location>(
jb::DefaultValue<jb::kAlwaysIncludeDefaults>([](auto* x) {
*x = ShardIndexLocation::kEnd;
}))),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&ShardedKeyValueStoreSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<
&ShardedKeyValueStoreSpecData::data_copy_concurrency>())));
class ShardedKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
ShardedKeyValueStoreSpec, ShardedKeyValueStoreSpecData> {
public:
static constexpr char id[] = "zarr3_sharding_indexed";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<kvstore::Spec> GetBase(std::string_view path) const override {
return data_.base;
}
};
class ShardedKeyValueStore
: public internal_kvstore::RegisteredDriver<ShardedKeyValueStore,
ShardedKeyValueStoreSpec> {
public:
explicit ShardedKeyValueStore(ShardedKeyValueStoreParameters&& params,
std::string_view shared_cache_key = {});
Future<ReadResult> Read(Key key, ReadOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override;
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) override;
Future<const void> DeleteRange(KeyRange range) override;
std::string DescribeKey(std::string_view key) override;
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final;
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override;
kvstore::Driver* base_kvstore_driver() const {
return shard_index_cache()->base_kvstore_driver();
}
const ShardIndexParameters& shard_index_params() const {
return shard_index_cache()->shard_index_params();
}
const Executor& executor() const { return shard_index_cache()->executor(); }
const std::string& base_kvstore_path() const {
return shard_index_cache()->base_kvstore_path();
}
const internal::CachePtr<ShardIndexCache>& shard_index_cache() const {
return write_cache_->shard_index_cache_;
}
absl::Status GetBoundSpecData(ShardedKeyValueStoreSpecData& spec) const;
internal::CachePtr<ShardedKeyValueStoreWriteCache> write_cache_;
struct DataForSpec {
Context::Resource<internal::CachePoolResource> cache_pool_resource;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_resource;
ZarrCodecChainSpec index_codecs;
};
std::unique_ptr<DataForSpec> data_for_spec_;
};
ShardedKeyValueStore::ShardedKeyValueStore(
ShardedKeyValueStoreParameters&& params,
std::string_view shared_cache_key) {
write_cache_ = internal::GetCache<ShardedKeyValueStoreWriteCache>(
params.cache_pool.get(), shared_cache_key, [&] {
return std::make_unique<ShardedKeyValueStoreWriteCache>(
internal::GetCache<ShardIndexCache>(
params.cache_pool.get(), "", [&] {
return std::make_unique<ShardIndexCache>(
std::move(params.base_kvstore),
std::move(params.base_kvstore_path),
std::move(params.executor),
std::move(params.index_params));
}));
});
this->SetBatchNestingDepth(
this->base_kvstore_driver()->BatchNestingDepth() +
1 +
1
);
}
class ReadOperationState;
using ReadOperationStateBase = internal_kvstore_batch::BatchReadEntry<
ShardedKeyValueStore, internal_kvstore_batch::ReadRequest<
EntryId, kvstore::ReadGenerationConditions>>;
class ReadOperationState
: public ReadOperationStateBase,
public internal::AtomicReferenceCount<ReadOperationState> {
public:
explicit ReadOperationState(BatchEntryKey&& batch_entry_key_)
: ReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<ReadOperationState>(
1) {}
private:
internal::PinnedCacheEntry<ShardIndexCache> shard_index_cache_entry_;
Batch successor_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<ReadOperationState> self(this,
internal::adopt_object_ref);
if (ShouldReadEntireShard()) {
ReadEntireShard(std::move(self), std::move(batch));
return;
}
shard_index_cache_entry_ =
GetCacheEntry(driver().shard_index_cache(), std::string_view{});
auto shard_index_read_future = shard_index_cache_entry_->Read(
{this->request_batch.staleness_bound, batch});
if (batch) {
if (!shard_index_read_future.ready()) {
successor_batch_ = Batch::New();
} else {
successor_batch_ = std::move(batch);
}
}
std::move(shard_index_read_future)
.ExecuteWhenReady(
[self = std::move(self)](ReadyFuture<const void> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), status = future.status()] {
if (!status.ok()) {
internal_kvstore_batch::SetCommonResult<Request>(
self->request_batch.requests, {status});
return;
}
OnShardIndexReady(std::move(self));
});
});
}
bool ShouldReadEntireShard() {
const int64_t num_entries_per_shard =
driver().shard_index_params().num_entries;
if (request_batch.requests.size() < num_entries_per_shard) {
return false;
}
const auto& first_request = request_batch.requests[0];
BitVec<> covered_entries(num_entries_per_shard);
int64_t num_covered = 0;
for (const auto& request : request_batch.requests) {
if (std::get<kvstore::ReadGenerationConditions>(request) !=
std::get<kvstore::ReadGenerationConditions>(first_request)) {
return false;
}
if (std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.byte_range.IsFull()) {
auto ref = covered_entries[std::get<EntryId>(request)];
if (!ref) ++num_covered;
ref = true;
}
}
if (num_covered != num_entries_per_shard) {
return false;
}
return true;
}
static void ReadEntireShard(internal::IntrusivePtr<ReadOperationState> self,
Batch batch) {
auto& first_request = self->request_batch.requests[0];
kvstore::ReadOptions read_options;
read_options.batch = std::move(batch);
read_options.generation_conditions =
std::move(std::get<kvstore::ReadGenerationConditions>(first_request));
read_options.staleness_bound = self->request_batch.staleness_bound;
auto& driver = self->driver();
driver.base_kvstore_driver()
->Read(driver.base_kvstore_path(), std::move(read_options))
.ExecuteWhenReady([self = std::move(self)](
ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), future = std::move(future)] {
OnFullShardReady(std::move(self), std::move(future.result()));
});
});
}
static void OnFullShardReady(internal::IntrusivePtr<ReadOperationState> self,
Result<kvstore::ReadResult>&& result) {
if (!result.ok() || !result->has_value()) {
internal_kvstore_batch::SetCommonResult(self->request_batch.requests,
std::move(result));
return;
}
auto& read_result = *result;
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndexFromFullShard(read_result.value,
self->driver().shard_index_params()),
internal_kvstore_batch::SetCommonResult(self->request_batch.requests,
_));
const auto complete_request = [&](Request& request) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
const auto index_entry = shard_index[std::get<EntryId>(request)];
if (index_entry.IsMissing()) {
byte_range_request.promise.SetResult(
kvstore::ReadResult::Missing(read_result.stamp));
return;
}
TENSORSTORE_RETURN_IF_ERROR(
index_entry.Validate(std::get<EntryId>(request),
read_result.value.size()),
static_cast<void>(byte_range_request.promise.SetResult(_)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_byte_range,
byte_range_request.byte_range.Validate(index_entry.length),
static_cast<void>(byte_range_request.promise.SetResult(_)));
validated_byte_range.inclusive_min += index_entry.offset;
validated_byte_range.exclusive_max += index_entry.offset;
kvstore::ReadResult request_read_result;
request_read_result.stamp = read_result.stamp;
request_read_result.state = kvstore::ReadResult::kValue;
request_read_result.value =
internal::GetSubCord(read_result.value, validated_byte_range);
byte_range_request.promise.SetResult(std::move(request_read_result));
};
for (auto& request : self->request_batch.requests) {
complete_request(request);
}
}
static void OnShardIndexReady(
internal::IntrusivePtr<ReadOperationState> self) {
std::shared_ptr<const ShardIndex> shard_index;
TimestampedStorageGeneration stamp;
{
auto lock = internal::AsyncCache::ReadLock<ShardIndexCache::ReadData>(
*self->shard_index_cache_entry_);
stamp = lock.stamp();
shard_index = lock.shared_data();
}
assert(!StorageGeneration::IsUnknown(stamp.generation));
if (!shard_index) {
internal_kvstore_batch::SetCommonResult(
self->request_batch.requests,
kvstore::ReadResult::Missing(std::move(stamp)));
return;
}
auto successor_batch = std::move(self->successor_batch_);
if (successor_batch) {
self->successor_batch_ = Batch::New();
}
const auto process_request = [&](Request& request) {
ShardIndexEntry index_entry = ShardIndexEntry::Missing();
kvstore::ReadResult::State state;
if (!std::get<kvstore::ReadGenerationConditions>(request).Matches(
stamp.generation)) {
state = kvstore::ReadResult::kUnspecified;
} else {
index_entry = (*shard_index)[std::get<EntryId>(request)];
state = kvstore::ReadResult::kMissing;
}
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
if (index_entry.IsMissing()) {
byte_range_request.promise.SetResult(
kvstore::ReadResult{state, {}, stamp});
return;
}
TENSORSTORE_RETURN_IF_ERROR(
index_entry.Validate(std::get<EntryId>(request)),
static_cast<void>(byte_range_request.promise.SetResult(
self->shard_index_cache_entry_->AnnotateError(
_,
true))));
assert(byte_range_request.byte_range.SatisfiesInvariants());
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_byte_range,
byte_range_request.byte_range.Validate(index_entry.length),
static_cast<void>(byte_range_request.promise.SetResult(_)));
if (validated_byte_range.inclusive_min ==
validated_byte_range.exclusive_max) {
byte_range_request.promise.SetResult(kvstore::ReadResult{
kvstore::ReadResult::kValue, absl::Cord(), stamp});
return;
}
kvstore::ReadOptions kvs_read_options;
kvs_read_options.generation_conditions.if_equal = stamp.generation;
kvs_read_options.staleness_bound = self->request_batch.staleness_bound;
kvs_read_options.batch = successor_batch;
kvs_read_options.byte_range =
ByteRange{static_cast<int64_t>(index_entry.offset +
validated_byte_range.inclusive_min),
static_cast<int64_t>(index_entry.offset +
validated_byte_range.exclusive_max)};
self->driver()
.base_kvstore_driver()
->Read(std::string(self->driver().base_kvstore_path()),
std::move(kvs_read_options))
.ExecuteWhenReady([self, &request](ReadyFuture<kvstore::ReadResult>
future) mutable {
const auto& status = future.status();
if (!status.ok()) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(status);
return;
}
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
future = std::move(future)] {
OnValueReady(std::move(self), request, std::move(future.value()));
});
});
};
for (auto& request : self->request_batch.requests) {
process_request(request);
}
}
static void OnValueReady(internal::IntrusivePtr<ReadOperationState> self,
Request& request, kvstore::ReadResult&& value) {
if (value.aborted()) {
MakeRequest<ReadOperationState>(self->driver(), self->successor_batch_,
value.stamp.time, std::move(request));
return;
}
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(std::move(value));
}
};
Future<kvstore::ReadResult> ShardedKeyValueStore::Read(Key key,
ReadOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
EntryId entry_id,
KeyToEntryIdOrError(key, shard_index_params().grid_shape()));
auto [promise, future] = PromiseFuturePair<kvstore::ReadResult>::Make();
ReadOperationState::MakeRequest<ReadOperationState>(
*this, options.batch, options.staleness_bound,
ReadOperationState::Request{{std::move(promise), options.byte_range},
{entry_id},
std::move(options.generation_conditions)});
return std::move(future);
}
struct ListOperationState
: public internal::FlowSenderOperationState<kvstore::ListEntry> {
using Base = internal::FlowSenderOperationState<kvstore::ListEntry>;
using Base::Base;
internal::PinnedCacheEntry<ShardIndexCache> shard_index_cache_entry_;
kvstore::ListOptions options_;
static void Start(ShardedKeyValueStore& store, kvstore::ListOptions&& options,
ListReceiver&& receiver) {
options.range = KeyRangeToInternalKeyRange(
options.range, store.shard_index_params().grid_shape());
auto self =
internal::MakeIntrusivePtr<ListOperationState>(std::move(receiver));
self->options_ = std::move(options);
self->shard_index_cache_entry_ =
GetCacheEntry(store.shard_index_cache(), std::string_view{});
auto shard_index_read_future =
self->shard_index_cache_entry_->Read({self->options_.staleness_bound});
auto* self_ptr = self.get();
LinkValue(
WithExecutor(store.executor(),
[self = std::move(self)](Promise<void> promise,
ReadyFuture<const void> future) {
if (self->cancelled()) return;
self->OnShardIndexReady();
}),
self_ptr->promise, std::move(shard_index_read_future));
}
void OnShardIndexReady() {
auto shard_index =
internal::AsyncCache::ReadLock<ShardIndex>(*shard_index_cache_entry_)
.shared_data();
if (!shard_index) {
return;
}
const auto& shard_index_params =
GetOwningCache(*shard_index_cache_entry_).shard_index_params();
span<const Index> grid_shape = shard_index_params.grid_shape();
auto start_index = InternalKeyToEntryId(options_.range.inclusive_min);
auto end_index = InternalKeyToEntryId(options_.range.exclusive_max);
auto& receiver = shared_receiver->receiver;
for (EntryId i = start_index; i < end_index; ++i) {
auto index_entry = (*shard_index)[i];
if (index_entry.IsMissing()) continue;
auto key = EntryIdToKey(i, grid_shape);
key.erase(0, options_.strip_prefix_length);
execution::set_value(receiver,
ListEntry{
std::move(key),
ListEntry::checked_size(index_entry.length),
});
}
}
};
void ShardedKeyValueStore::ListImpl(ListOptions options,
ListReceiver receiver) {
ListOperationState::Start(*this, std::move(options), std::move(receiver));
}
Future<TimestampedStorageGeneration> ShardedKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
return internal_kvstore::WriteViaTransaction(
this, std::move(key), std::move(value), std::move(options));
}
absl::Status ShardedKeyValueStore::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
TENSORSTORE_ASSIGN_OR_RETURN(
EntryId entry_id,
KeyToEntryIdOrError(key, shard_index_params().grid_shape()));
key = EntryIdToInternalKey(entry_id);
auto entry = GetCacheEntry(write_cache_, std::string_view{});
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->ReadModifyWrite(phase, std::move(key), source);
if (!transaction) {
transaction.reset(node.unlock()->transaction());
}
return absl::OkStatus();
}
absl::Status ShardedKeyValueStore::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
range = KeyRangeToInternalKeyRange(range, shard_index_params().grid_shape());
auto entry = GetCacheEntry(write_cache_, std::string_view{});
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->DeleteRange(std::move(range));
return absl::OkStatus();
}
Future<const void> ShardedKeyValueStore::DeleteRange(KeyRange range) {
range = KeyRangeToInternalKeyRange(range, shard_index_params().grid_shape());
internal::OpenTransactionPtr transaction;
auto entry = GetCacheEntry(write_cache_, std::string_view{});
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->DeleteRange(std::move(range));
return node->transaction()->future();
}
std::string ShardedKeyValueStore::DescribeKey(std::string_view key) {
return tensorstore::StrCat(
zarr3_sharding_indexed::DescribeKey(key,
shard_index_params().grid_shape()),
" in ", base_kvstore_driver()->DescribeKey(base_kvstore_path()));
}
kvstore::SupportedFeatures ShardedKeyValueStore::GetSupportedFeatures(
const KeyRange& key_range) const {
return base_kvstore_driver()->GetSupportedFeatures(
KeyRange::Singleton(base_kvstore_path()));
}
Result<KvStore> ShardedKeyValueStore::GetBase(
std::string_view path, const Transaction& transaction) const {
return KvStore(kvstore::DriverPtr(base_kvstore_driver()), base_kvstore_path(),
transaction);
}
}
}
namespace garbage_collection {
template <>
struct GarbageCollection<zarr3_sharding_indexed::ShardedKeyValueStore> {
static void Visit(GarbageCollectionVisitor& visitor,
const zarr3_sharding_indexed::ShardedKeyValueStore& value) {
garbage_collection::GarbageCollectionVisit(visitor,
*value.base_kvstore_driver());
}
};
}
namespace zarr3_sharding_indexed {
absl::Status ShardedKeyValueStore::GetBoundSpecData(
ShardedKeyValueStoreSpecData& spec) const {
if (!data_for_spec_) {
return absl::UnimplementedError("");
}
TENSORSTORE_ASSIGN_OR_RETURN(spec.base.driver,
base_kvstore_driver()->GetBoundSpec());
spec.base.path = base_kvstore_path();
spec.data_copy_concurrency = data_for_spec_->data_copy_concurrency_resource;
spec.cache_pool = data_for_spec_->cache_pool_resource;
spec.index_codecs = data_for_spec_->index_codecs;
const auto& shard_index_params = this->shard_index_params();
spec.index_location = shard_index_params.index_location;
spec.grid_shape.assign(shard_index_params.index_shape.begin(),
shard_index_params.index_shape.end() - 1);
return absl::OkStatus();
}
Future<kvstore::DriverPtr> ShardedKeyValueStoreSpec::DoOpen() const {
ShardIndexParameters index_params;
index_params.index_location = data_.index_location;
TENSORSTORE_RETURN_IF_ERROR(
index_params.Initialize(data_.index_codecs, data_.grid_shape));
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const ShardedKeyValueStoreSpec>(this),
index_params =
std::move(index_params)](kvstore::KvStore& base_kvstore) mutable
-> Result<kvstore::DriverPtr> {
std::string cache_key;
internal::EncodeCacheKey(
&cache_key, base_kvstore.driver, base_kvstore.path,
spec->data_.data_copy_concurrency, spec->data_.grid_shape,
spec->data_.index_codecs);
ShardedKeyValueStoreParameters params;
params.base_kvstore = std::move(base_kvstore.driver);
params.base_kvstore_path = std::move(base_kvstore.path);
params.executor = spec->data_.data_copy_concurrency->executor;
params.cache_pool = *spec->data_.cache_pool;
params.index_params = std::move(index_params);
auto driver = internal::MakeIntrusivePtr<ShardedKeyValueStore>(
std::move(params), cache_key);
driver->data_for_spec_.reset(new ShardedKeyValueStore::DataForSpec{
spec->data_.cache_pool,
spec->data_.data_copy_concurrency,
spec->data_.index_codecs,
});
return driver;
},
kvstore::Open(data_.base));
}
kvstore::DriverPtr GetShardedKeyValueStore(
ShardedKeyValueStoreParameters&& parameters) {
return kvstore::DriverPtr(new ShardedKeyValueStore(std::move(parameters)));
}
}
}
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::zarr3_sharding_indexed::ShardedKeyValueStoreSpec>
registration;
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "re2/re2.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/write.h"
#include "riegeli/digests/crc32c_digester.h"
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/riegeli/digest_suffixed_writer.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Executor;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::zarr3_sharding_indexed::EntryId;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey;
using ::tensorstore::zarr3_sharding_indexed::GetShardedKeyValueStore;
using ::tensorstore::zarr3_sharding_indexed::ShardedKeyValueStoreParameters;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
absl::Cord Bytes(std::initializer_list<unsigned char> x) {
return absl::Cord(std::string(x.begin(), x.end()));
}
absl::Cord WithCrc32c(absl::Cord input) {
absl::Cord output;
riegeli::CordWriter writer{&output};
TENSORSTORE_CHECK_OK(riegeli::Write(
input, tensorstore::internal::DigestSuffixedWriter<
riegeli::Crc32cDigester,
tensorstore::internal::LittleEndianDigestWriter>{&writer}));
ABSL_CHECK(writer.Close());
return output;
}
class GetKey {
public:
GetKey(bool sequential, std::vector<Index> grid_shape)
: sequential_(sequential),
grid_shape_(std::move(grid_shape)),
num_entries_(
tensorstore::ProductOfExtents(span<const Index>(grid_shape_))) {}
std::string operator()(std::string key) const {
auto it = key_to_entry_id_.find(key);
if (it == key_to_entry_id_.end()) {
ABSL_CHECK_LT(entry_id_to_key_.size(), num_entries_);
while (true) {
auto x = sequential_ ? next_entry_id_++ : absl::Uniform<EntryId>(gen_);
x = x % num_entries_;
if (entry_id_to_key_.emplace(x, key).second) {
it = key_to_entry_id_.emplace(key, x).first;
break;
}
}
}
return EntryIdToKey(it->second, grid_shape_);
}
private:
bool sequential_;
std::vector<Index> grid_shape_;
EntryId num_entries_;
mutable EntryId next_entry_id_ = 0;
mutable absl::BitGen gen_;
mutable absl::flat_hash_map<std::string, EntryId> key_to_entry_id_;
mutable absl::flat_hash_map<EntryId, std::string> entry_id_to_key_;
};
kvstore::DriverPtr GetDefaultStore(kvstore::DriverPtr base_kvstore,
std::string base_kvstore_path,
Executor executor,
CachePool::StrongPtr cache_pool,
const std::vector<Index>& grid_shape) {
ShardedKeyValueStoreParameters params;
params.base_kvstore = base_kvstore;
params.base_kvstore_path = base_kvstore_path;
params.executor = executor;
params.cache_pool = CachePool::WeakPtr(cache_pool);
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto index_codecs,
ZarrCodecChainSpec::FromJson(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
{{"name", "crc32c"}}}));
params.index_params.index_location = ShardIndexLocation::kEnd;
TENSORSTORE_CHECK_OK(
params.index_params.Initialize(index_codecs, grid_shape));
return GetShardedKeyValueStore(std::move(params));
}
TEST(ShardedKeyValueStoreTest, BasicFunctionality) {
std::vector<std::pair<std::string, tensorstore::Executor>> executors{
{"inline", tensorstore::InlineExecutor{}},
{"thread_pool", tensorstore::internal::DetachedThreadPool(2)}};
for (const auto& [executor_name, executor] : executors) {
for (const auto sequential_ids : {true, false}) {
auto cache_pool = CachePool::Make(kSmallCacheLimits);
auto base_kv_store = tensorstore::GetMemoryKeyValueStore();
const int64_t num_entries = 100;
SCOPED_TRACE(executor_name);
auto store = GetDefaultStore(base_kv_store, "shard_path", executor,
cache_pool, {num_entries});
GetKey get_key_fn(sequential_ids, {num_entries});
tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn);
}
}
}
TEST(ShardedKeyValueStoreTest, DescribeKey) {
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
int64_t num_entries = 100;
std::vector<Index> grid_shape{num_entries};
kvstore::DriverPtr store =
GetDefaultStore(base_kv_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool, grid_shape);
for (const auto& [key, description] :
std::vector<std::pair<uint32_t, std::string>>{
{0, "shard entry {0}/{100} in \"shard_path\""},
{1, "shard entry {1}/{100} in \"shard_path\""},
}) {
EXPECT_EQ(description, store->DescribeKey(EntryIdToKey(key, grid_shape)));
}
}
class RawEncodingTest : public ::testing::Test {
protected:
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(const std::vector<Index>& grid_shape) {
return GetDefaultStore(base_kv_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool,
grid_shape);
}
};
TEST_F(RawEncodingTest, MultipleUnconditionalWrites) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"),
absl::Cord("efgh")};
std::vector<Future<TimestampedStorageGeneration>> futures;
auto key = EntryIdToKey(10, grid_shape);
tensorstore::Transaction txn(tensorstore::isolated);
for (auto value : values) {
futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value));
}
txn.CommitAsync().IgnoreFuture();
std::vector<Result<TimestampedStorageGeneration>> results;
for (const auto& future : futures) {
results.push_back(future.result());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read,
base_kv_store->Read("shard_path").result());
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
for (size_t i = 0; i < results.size(); ++i) {
if (results[i] && results[i]->generation == shard_read.stamp.generation) {
EXPECT_THAT(store->Read(key).result(),
MatchesKvsReadResult(values[i], results[i]->generation));
}
}
}
TEST_F(RawEncodingTest, List) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
std::map<std::string, absl::Cord> values{
{EntryIdToKey(1, grid_shape), absl::Cord("a")},
{EntryIdToKey(2, grid_shape), absl::Cord("bc")},
{EntryIdToKey(3, grid_shape), absl::Cord("def")},
{EntryIdToKey(10, grid_shape), absl::Cord("xyz")}};
for (auto [key, value] : values) {
TENSORSTORE_EXPECT_OK(store->Write(key, value));
}
EXPECT_THAT(tensorstore::internal::GetMap(store),
::testing::Optional(::testing::ElementsAreArray(values)));
}
TEST_F(RawEncodingTest, WritesAndDeletes) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
StorageGeneration gen1, gen2, gen3;
{
tensorstore::Transaction txn(tensorstore::isolated);
auto init_future1 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape), absl::Cord("a"));
auto init_future2 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("bc"));
auto init_future3 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(3, grid_shape), absl::Cord("def"));
txn.CommitAsync().IgnoreFuture();
gen1 = init_future1.value().generation;
gen2 = init_future2.value().generation;
gen3 = init_future3.value().generation;
}
tensorstore::Transaction txn(tensorstore::isolated);
auto future1 =
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()});
auto future2 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape),
absl::Cord("ww"), {gen2});
auto future3 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape),
absl::Cord("xx"), {gen2});
auto future4 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(4, grid_shape),
absl::Cord("zz"), {StorageGeneration::NoValue()});
auto future5 = kvstore::DeleteCommitted(KvStore{store, txn},
EntryIdToKey(3, grid_shape), {gen3});
txn.CommitAsync().IgnoreFuture();
EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read,
base_kv_store->Read("shard_path").result());
EXPECT_THAT(
std::vector({future2.result(), future3.result()}),
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("a")));
EXPECT_THAT(store->Read(EntryIdToKey(2, grid_shape)).result(),
MatchesKvsReadResult(
!StorageGeneration::IsUnknown(future2.result()->generation)
? absl::Cord("ww")
: absl::Cord("xx")));
EXPECT_THAT(store->Read(EntryIdToKey(3, grid_shape)).result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(store->Read(EntryIdToKey(4, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("zz")));
}
std::vector<std::vector<Result<TimestampedStorageGeneration>>>
TestOrderDependentWrites(
std::function<void()> init,
std::function<Future<TimestampedStorageGeneration>()> op0,
std::function<Future<TimestampedStorageGeneration>()> op1,
std::function<void()> finalize) {
std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results;
for (int i = 0; i < 2; ++i) {
std::vector<Future<TimestampedStorageGeneration>> futures(2);
init();
if (i == 0) {
futures[0] = op0();
futures[1] = op1();
} else {
futures[1] = op1();
futures[0] = op0();
}
finalize();
all_results.push_back({futures[0].result(), futures[1].result()});
}
return all_results;
}
TEST_F(RawEncodingTest, WriteThenDelete) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")).result());
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("a")));
TENSORSTORE_ASSERT_OK(store->Delete(EntryIdToKey(1, grid_shape)).result());
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResultNotFound());
}
TEST_F(RawEncodingTest, MultipleDeleteExisting) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
StorageGeneration gen;
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
gen = store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a"))
.value()
.generation;
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::DeleteCommitted(KvStore{store, txn},
EntryIdToKey(1, grid_shape),
{gen});
},
[&] {
return kvstore::DeleteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::UnorderedElementsAre(
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(
StorageGeneration::NoValue())),
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()),
MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
store->Delete(EntryIdToKey(0, grid_shape)).value();
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::WriteCommitted(KvStore{store, txn},
EntryIdToKey(0, grid_shape),
absl::Cord("a"));
},
[&] {
return kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(0, grid_shape),
absl::Cord("b"),
{StorageGeneration::FromString("g")});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::Each(::testing::ElementsAre(
MatchesTimestampedStorageGeneration(
::testing::AllOf(::testing::Not(StorageGeneration::NoValue()),
::testing::Not(StorageGeneration::Invalid()))),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, MultipleDeleteNonExisting) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
tensorstore::Transaction txn(tensorstore::isolated);
std::vector futures{
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()}),
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()})};
txn.CommitAsync().IgnoreFuture();
std::vector results{futures[0].result(), futures[1].result()};
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue())));
}
TEST_F(RawEncodingTest, ShardIndexTooShort) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
base_kv_store->Write("shard_path", Bytes({1, 2, 3})).value();
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
RE2::QuoteMeta("Error reading shard index in \"shard_path\": "
"Requested byte range [-1604, ?) is not valid "
"for value of size 3")));
EXPECT_THAT(
store->Write(EntryIdToKey(10, grid_shape), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading \"shard_path\": "
"Existing shard has size of 3 bytes, but expected at least "
"1604 bytes"));
}
TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}));
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(
store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading shard index in \"shard_path\": "
"Invalid shard index entry 1 with offset=.*, length=.*"));
}
TEST_F(RawEncodingTest, ShardIndexEntryByteRangeOutOfRange) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
37, 0, 0, 0, 0, 0, 0, 0,
}));
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("x")).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading \"shard_path\": "
"Shard index entry 1 with byte range .* is invalid "
"for shard of size .*"));
}
TEST_F(RawEncodingTest, ShardIndexInvalidChecksum) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
});
content.Append("abcd");
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading shard index in \"shard_path\": "
"Digest mismatch.*"));
}
class UnderlyingKeyValueStoreTest : public ::testing::Test {
protected:
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr GetStore(std::vector<Index> grid_shape) {
return GetDefaultStore(mock_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool,
grid_shape);
}
std::vector<Index> grid_shape{5};
kvstore::DriverPtr store = GetStore(grid_shape);
};
TEST_F(UnderlyingKeyValueStoreTest, Read) {
absl::Time init_time = UniqueNow();
absl::Time shard_index_time;
{
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time));
shard_index_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), shard_index_time}});
}
ASSERT_FALSE(future.ready()) << future.status();
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(3, grid_shape), options);
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(shard_index_time));
}
{
auto req_time = UniqueNow();
auto future = store->Read(EntryIdToKey(3, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
shard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), shard_index_time}));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(shard_index_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(2, grid_shape), options);
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
EXPECT_EQ(init_time, req.options.staleness_bound);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(2, grid_shape), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
shard_index_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g1"), shard_index_time}});
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g1"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 16), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({4, 5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g1"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({4, 5, 6, 7, 8, 9}),
StorageGeneration::FromString("g1"), read_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, TransactionReadThenCommit) {
tensorstore::Transaction txn(tensorstore::isolated);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
{
auto future =
kvstore::Read(KvStore{store, txn}, EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop();
req(memory_store);
ASSERT_EQ(0, mock_store->read_requests.size());
}
EXPECT_THAT(future.result(),
::testing::Optional(MatchesKvsReadResultNotFound()));
}
auto commit_future = txn.CommitAsync();
TENSORSTORE_ASSERT_OK(commit_future.result());
EXPECT_EQ(0, mock_store->read_requests.size());
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentDeleteAfterReadingMinishardIndex) {
auto req_time = UniqueNow();
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), absl::Now()}});
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(read_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingShardIndex) {
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading shard index in \"shard_path\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingData) {
auto future = store->Read(EntryIdToKey(0x2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), absl::Now()}});
}
ASSERT_FALSE(future.ready()) << future.status();
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadInvalidKey) {
auto future = store->Read("abc", {});
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteInvalidKey) {
auto future = store->Write("abc", absl::Cord("x"));
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteInvalidKey) {
auto future = store->Delete("abc");
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShard) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->Write(EntryIdToKey(1, grid_shape), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), absl::Now()}});
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
auto expected = Bytes({
1, 2, 3,
});
expected.Append(WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
EXPECT_THAT(req.value, ::testing::Optional(expected));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, UnconditionalWrite) {
grid_shape = {2};
store = GetStore(grid_shape);
auto txn = Transaction(tensorstore::isolated);
auto future1 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(0, grid_shape), Bytes({1, 2, 3}));
auto future2 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape), Bytes({4, 5, 6}));
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_EQ(0, mock_store->write_requests.size());
txn.CommitAsync().IgnoreFuture();
ASSERT_EQ(0, mock_store->read_requests.size());
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
auto expected = Bytes({
1, 2, 3,
4, 5, 6,
});
expected.Append(WithCrc32c(Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
EXPECT_THAT(req.value, ::testing::Optional(expected));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future1.ready());
ASSERT_TRUE(future2.ready());
EXPECT_THAT(future1.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
EXPECT_THAT(future2.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ConditionalWriteDespiteMaxChunks) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->Write(EntryIdToKey(0, grid_shape), Bytes({1, 2, 3}),
{StorageGeneration::NoValue()});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), absl::Now()}});
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
}
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShardError) {
auto future = store->Write(EntryIdToKey(1, grid_shape), Bytes({1, 2, 3}));
future.Force();
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
req.promise.SetResult(ReadResult{
ReadResult::kMissing, {}, {StorageGeneration::NoValue(), absl::Now()}});
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
req.promise.SetResult(absl::UnknownError("Write error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesStatus(absl::StatusCode::kUnknown,
"Error writing \"shard_path\": "
"Write error"));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShard) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->Write(EntryIdToKey(0, grid_shape), Bytes({1, 2, 3}));
ASSERT_FALSE(future.ready()) << future.status();
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
auto content = Bytes({
4, 5, 6,
});
content.Append(WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
req.promise.SetResult(
ReadResult{ReadResult::kValue,
content,
{StorageGeneration::FromString("g0"), absl::Now()}});
}
ASSERT_FALSE(future.ready()) << future.status();
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
auto content = Bytes({
1, 2, 3,
4, 5, 6,
});
content.Append(WithCrc32c(Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
EXPECT_THAT(req.value, content);
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g1"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g1"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShardReadError) {
auto future = store->Write(EntryIdToKey(1, grid_shape), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesStatus(absl::StatusCode::kUnknown,
"Error reading \"shard_path\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteRangeWhenEmpty) {
grid_shape = {2};
store = GetStore(grid_shape);
auto future = store->DeleteRange({});
future.Force();
{
auto req = mock_store->write_requests.pop();
ASSERT_EQ(0, mock_store->write_requests.size());
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_TRUE(StorageGeneration::IsUnknown(
req.options.generation_conditions.if_equal));
EXPECT_EQ(std::nullopt, req.value);
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g1"),
absl::Now());
}
ASSERT_TRUE(future.ready());
TENSORSTORE_ASSERT_OK(future);
}
TEST_F(UnderlyingKeyValueStoreTest, BatchRead) {
cache_pool = CachePool::Make({});
auto memory_store = tensorstore::GetMemoryKeyValueStore();
mock_store->forward_to = memory_store;
mock_store->log_requests = true;
mock_store->handle_batch_requests = true;
grid_shape = {3};
store = GetStore(grid_shape);
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(0, grid_shape), absl::Cord("abc")).result());
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("def")).result());
mock_store->request_log.pop_all();
{
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(EntryIdToKey(0, grid_shape), options),
store->Read(EntryIdToKey(1, grid_shape), options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(2));
}
{
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(EntryIdToKey(0, grid_shape), options),
store->Read(EntryIdToKey(1, grid_shape), options),
store->Read(EntryIdToKey(2, grid_shape), options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(1));
}
{
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options1;
options1.batch = Batch::New();
kvstore::ReadOptions options2;
options2.batch = options1.batch;
options2.generation_conditions.if_not_equal =
StorageGeneration::Invalid();
futures = {
store->Read(EntryIdToKey(0, grid_shape), options1),
store->Read(EntryIdToKey(1, grid_shape), options1),
store->Read(EntryIdToKey(2, grid_shape), options2),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(2));
}
}
class ReadModifyWriteTest : public ::testing::Test {
protected:
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
tensorstore::kvstore::DriverPtr memory_store =
tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(int64_t num_entries = 100) {
return GetDefaultStore(mock_store, "shard_path",
tensorstore::InlineExecutor{},
CachePool::Make(CachePool::Limits{}), {num_entries});
}
auto GetKvsBackedCache(kvstore::DriverPtr store = {}) {
if (!store) store = GetStore();
return GetCache<KvsBackedTestCache>(
CachePool::Make(CachePool::Limits{}).get(), "",
[&] { return std::make_unique<KvsBackedTestCache>(store); });
}
};
TEST_F(ReadModifyWriteTest, MultipleCaches) {
std::vector<Index> grid_shape{100};
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "def"));
auto read_future = GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdef")));
}
transaction.CommitAsync().IgnoreFuture();
auto write_req = mock_store->write_requests.pop();
write_req(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(ReadModifyWriteTest, MultiplePhasesMultipleCaches) {
std::vector<Index> grid_shape{100};
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "def"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "ghi"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, EntryIdToKey(0x0, grid_shape))
->Modify(open_transaction, false, "jkl"));
auto read_future = GetCacheEntry(cache1, EntryIdToKey(0x0, grid_shape))
->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdefghijkl")));
}
transaction.CommitAsync().IgnoreFuture();
mock_store->write_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->write_requests.pop()(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TENSORSTORE_GLOBAL_INITIALIZER {
using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions;
using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest;
for (bool underlying_atomic : {false, true}) {
KvsBackedCacheBasicTransactionalTestOptions options;
const int64_t num_entries = 100;
options.test_name = tensorstore::StrCat(
"ZarrShardingIndexed/underlying_atomic=", underlying_atomic);
options.get_store = [=] {
return GetDefaultStore(
tensorstore::GetMemoryKeyValueStore(underlying_atomic),
"shard_path", tensorstore::InlineExecutor{},
CachePool::Make(CachePool::Limits{}), {num_entries});
};
options.delete_range_supported = true;
options.multi_key_atomic_supported = true;
options.get_key_getter = [=] {
return [getter = std::make_shared<GetKey>(
true, std::vector<Index>{num_entries})](
auto key) { return (*getter)(key); };
};
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
}
TEST(ShardedKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "memory"}, {"path", "shard_path"}};
options.full_spec = {
{"driver", "zarr3_sharding_indexed"},
{"base", options.full_base_spec},
{"grid_shape", {100, 200}},
{"index_location", "end"},
{"index_codecs",
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}},
};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, SpecRoundtripFile) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "file"},
{"path", tempdir.path() + "/shard_path"}};
options.full_spec = {
{"driver", "zarr3_sharding_indexed"},
{"base", options.full_base_spec},
{"grid_shape", {100, 200}},
{"index_location", "end"},
{"index_codecs",
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}},
};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, Base) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec,
kvstore::Spec::FromJson(
{{"driver", "zarr3_sharding_indexed"},
{"base", "memory:
{"grid_shape", {100, 200}},
{"index_location", "end"},
{"index_codecs",
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}},
{"path", "1"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_spec,
kvstore::Spec::FromJson("memory:
EXPECT_THAT(spec.base(), ::testing::Optional(base_spec));
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open(base_spec, context).result());
EXPECT_THAT(store.base(), ::testing::Optional(base_store));
auto transaction = tensorstore::Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_with_txn, store | transaction);
EXPECT_THAT(store_with_txn.base(), base_store | transaction);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c2c26fb1-265a-4feb-9f4d-8261e92daeb4 | cpp | tensorflow/tensorflow | tf_dialect_to_executor | tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc | tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h"
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "xla/tsl/lib/monitoring/counter.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::LogicalResult;
using mlir::ModuleOp;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
auto *tf_dialect_to_executor_dialect_status = tsl::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/tf_dialect_to_executor_dialect_status",
"Counts how often a successful export from TF Dialect to Executor Dialect "
"is",
"status");
constexpr char kExportSuccess[] = "success";
constexpr char kExportFailed[] = "failed";
namespace {
void AddTfDialectToExecutorPasses(OpPassManager &pm) {
pm.addPass(mlir::TF::CreateTFRegionControlFlowToFunctional());
pm.addNestedPass<FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateSplitIntoIslandPerOpPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateReplicateToIslandPass(
false));
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateReplicaIDToDeviceOrdinalPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateParallelExecuteToIslandsPass(
false));
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateLaunchToDeviceAttributePass(
false));
pm.addPass(
mlir::tf_executor::CreateTFExecutorUpdateControlDependenciesPass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUDevicePropagationPass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUColocateSplitsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_convert_control_to_data_outputs_pass) {
bool composite_tpuexecute_side_effects =
tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_composite_tpuexecute_side_effects;
pm.addPass(
mlir::tf_executor::CreateTFExecutorConvertControlToDataOutputsPass(
composite_tpuexecute_side_effects));
}
pm.addPass(mlir::TF::CreateVerifySuitableForExportPass());
}
tensorflow::Status RecordStatusIfError(absl::Status status) {
if (status.ok()) {
return absl::OkStatus();
}
tf_dialect_to_executor_dialect_status->GetCell(kExportFailed)->IncrementBy(1);
VLOG(1) << "Failed to export from TF Dialect to TF Executor Dialect. "
<< status;
constexpr char bridge_subcomponent[] =
"TFXLA_TF_FUNCTIONAL_TO_EXECUTOR_EXPORT_v2";
constexpr char kBridgeComponent[] = "TFXLABridge";
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
tsl::error_logging::Log(kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
}
tensorflow::Status ExportFromTensorflowDialectToExecutor(
ModuleOp module, llvm::StringRef module_name) {
PassManager tf_to_executor(module.getContext());
::tensorflow::applyTensorflowAndCLOptions(tf_to_executor);
tf_to_executor.enableVerifier();
AddTfDialectToExecutorPasses(tf_to_executor);
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), kDebugGroupMain,
"tfxla_bridge_v2_tfdialect_to_executor_before"),
module, llvm::StringRef(), &tf_to_executor);
if (VLOG_IS_ON(2) ||
DEBUG_DATA_DUMPER()->ShouldDump(
module_name.str(), kDebugGroupBridgePhase1ExecutorExport)) {
internal::EnablePassIRPrinting(
tf_to_executor, kDebugGroupBridgePhase1ExecutorExport, module_name);
}
}
LogicalResult result = tf_to_executor.run(module);
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), kDebugGroupMain,
"tfxla_bridge_v2_tfdialect_to_executor_after"),
module, llvm::StringRef(), &tf_to_executor);
}
if (result.failed()) {
return RecordStatusIfError(
absl::InternalError("Failed to export from TF Dialect to TF Executor "
"Dialect. Read LLVM Pipeline Error"));
}
tf_dialect_to_executor_dialect_status->GetCell(kExportSuccess)
->IncrementBy(1);
return absl::OkStatus();
}
mlir::PassPipelineRegistration<> tf_dialect_to_executor_pipeline(
"tf-dialect-to-executor-v2",
"Run passes to convert from TF Dialect to Executor in preparation for "
"exporting module back to TF Graph.",
AddTfDialectToExecutorPasses);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h"
#include <stdlib.h>
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/testing/utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/resource_loader.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
constexpr char kExportStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/tf_dialect_to_executor_dialect_status";
constexpr char kExportSuccess[] = "success";
constexpr char kExportFailed[] = "failed";
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::tf2xla::v2::testing::TestDataPath;
size_t CountSubstring(absl::string_view str, absl::string_view substr) {
size_t count = 0;
size_t idx = str.find(substr);
while (idx != std::string::npos) {
count++;
idx = str.find(substr, idx + 1);
}
return count;
}
class TensorflowDialectToExecutorTest : public ::testing::Test {
public:
TensorflowDialectToExecutorTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TensorflowDialectToExecutorTest, ConvertsToExecutor) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_));
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 0);
}
TEST_F(TensorflowDialectToExecutorTest, ErrorsWhenCannotConvert) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("invalid_executor.mlir"));
EXPECT_FALSE(ExportFromTensorflowDialectToExecutor(*mlir_module_).ok());
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 0);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 1);
}
TEST_F(TensorflowDialectToExecutorTest, PrunesDeadOps) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("func_with_dead_ops.mlir"));
TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_));
std::string module_dump;
llvm::raw_string_ostream raw_stream(module_dump);
mlir_module_->print(raw_stream);
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 0);
EXPECT_EQ(
CountSubstring(module_dump, "tf_executor.island wraps \"tf.Concat\""), 2);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
01db218e-89d2-4dfd-b6a3-1d368f61f0d1 | cpp | abseil/abseil-cpp | fast_type_id | absl/base/internal/fast_type_id.h | absl/base/internal/fast_type_id_test.cc | #ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
template <typename Type>
struct FastTypeTag {
constexpr static char dummy_var = 0;
};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename Type>
constexpr char FastTypeTag<Type>::dummy_var;
#endif
using FastTypeIdType = const void*;
template <typename Type>
constexpr inline FastTypeIdType FastTypeId() {
return &FastTypeTag<Type>::dummy_var;
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/base/internal/fast_type_id.h"
#include <cstdint>
#include <map>
#include <vector>
#include "gtest/gtest.h"
namespace {
namespace bi = absl::base_internal;
#define PRIM_TYPES(A) \
A(bool) \
A(short) \
A(unsigned short) \
A(int) \
A(unsigned int) \
A(long) \
A(unsigned long) \
A(long long) \
A(unsigned long long) \
A(float) \
A(double) \
A(long double)
TEST(FastTypeIdTest, PrimitiveTypes) {
bi::FastTypeIdType type_ids[] = {
#define A(T) bi::FastTypeId<T>(),
PRIM_TYPES(A)
#undef A
#define A(T) bi::FastTypeId<const T>(),
PRIM_TYPES(A)
#undef A
#define A(T) bi::FastTypeId<volatile T>(),
PRIM_TYPES(A)
#undef A
#define A(T) bi::FastTypeId<const volatile T>(),
PRIM_TYPES(A)
#undef A
};
size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
for (int i = 0; i < total_type_ids; ++i) {
EXPECT_EQ(type_ids[i], type_ids[i]);
for (int j = 0; j < i; ++j) {
EXPECT_NE(type_ids[i], type_ids[j]);
}
}
}
#define FIXED_WIDTH_TYPES(A) \
A(int8_t) \
A(uint8_t) \
A(int16_t) \
A(uint16_t) \
A(int32_t) \
A(uint32_t) \
A(int64_t) \
A(uint64_t)
TEST(FastTypeIdTest, FixedWidthTypes) {
bi::FastTypeIdType type_ids[] = {
#define A(T) bi::FastTypeId<T>(),
FIXED_WIDTH_TYPES(A)
#undef A
#define A(T) bi::FastTypeId<const T>(),
FIXED_WIDTH_TYPES(A)
#undef A
#define A(T) bi::FastTypeId<volatile T>(),
FIXED_WIDTH_TYPES(A)
#undef A
#define A(T) bi::FastTypeId<const volatile T>(),
FIXED_WIDTH_TYPES(A)
#undef A
};
size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
for (int i = 0; i < total_type_ids; ++i) {
EXPECT_EQ(type_ids[i], type_ids[i]);
for (int j = 0; j < i; ++j) {
EXPECT_NE(type_ids[i], type_ids[j]);
}
}
}
TEST(FastTypeIdTest, AliasTypes) {
using int_alias = int;
EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>());
}
TEST(FastTypeIdTest, TemplateSpecializations) {
EXPECT_NE(bi::FastTypeId<std::vector<int>>(),
bi::FastTypeId<std::vector<long>>());
EXPECT_NE((bi::FastTypeId<std::map<int, float>>()),
(bi::FastTypeId<std::map<int, double>>()));
}
struct Base {};
struct Derived : Base {};
struct PDerived : private Base {};
TEST(FastTypeIdTest, Inheritance) {
EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>());
EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>());
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/fast_type_id.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/fast_type_id_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
3b4a14ae-fa50-425d-8ad4-aec55977753d | cpp | tensorflow/tensorflow | metric_def | tensorflow/core/lib/monitoring/metric_def.h | tensorflow/core/lib/monitoring/metric_def_test.cc | #ifndef TENSORFLOW_CORE_LIB_MONITORING_METRIC_DEF_H_
#define TENSORFLOW_CORE_LIB_MONITORING_METRIC_DEF_H_
#include <array>
#include <functional>
#include <string>
#include <vector>
#include "xla/tsl/lib/monitoring/metric_def.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace monitoring {
using tsl::monitoring::MetricDef;
using tsl::monitoring::MetricKind;
using tsl::monitoring::ValueType;
}
}
#endif | #include "tensorflow/core/lib/monitoring/metric_def.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
TEST(MetricDefTest, Simple) {
const MetricDef<MetricKind::kCumulative, int64_t, 0> metric_def0(
"/tensorflow/metric0", "An example metric with no labels.");
const MetricDef<MetricKind::kGauge, HistogramProto, 1> metric_def1(
"/tensorflow/metric1", "An example metric with one label.", "LabelName");
EXPECT_EQ("/tensorflow/metric0", metric_def0.name());
EXPECT_EQ("/tensorflow/metric1", metric_def1.name());
EXPECT_EQ(MetricKind::kCumulative, metric_def0.kind());
EXPECT_EQ(MetricKind::kGauge, metric_def1.kind());
EXPECT_EQ("An example metric with no labels.", metric_def0.description());
EXPECT_EQ("An example metric with one label.", metric_def1.description());
EXPECT_EQ(0, metric_def0.label_descriptions().size());
ASSERT_EQ(1, metric_def1.label_descriptions().size());
EXPECT_EQ("LabelName", metric_def1.label_descriptions()[0]);
}
TEST(MetricDefTest, StringsPersist) {
string name = "/tensorflow/metric0";
string description = "test description";
string label_description = "test label description";
const MetricDef<MetricKind::kCumulative, int64_t, 1> metric_def(
name, description, label_description);
name[4] = 'A';
description[4] = 'B';
label_description[4] = 'C';
EXPECT_NE(name, metric_def.name());
EXPECT_NE(description, metric_def.description());
EXPECT_NE(label_description, metric_def.label_descriptions()[0]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/metric_def.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/metric_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
81478791-4647-44ad-a8fe-a105c9dcf9ae | cpp | tensorflow/tensorflow | heap_simulator | third_party/xla/xla/service/heap_simulator/heap_simulator.cc | third_party/xla/xla/service/heap_simulator/heap_simulator_test.cc | #include "xla/service/heap_simulator/heap_simulator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <list>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/map_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/time_utils.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
constexpr int64_t kMaxMemoryMapDimensionSize = 100;
struct AsciiMemoryMapParameters {
int64_t memory_block_size = 1;
int64_t end_of_last_occupied_chunk = -1;
};
AsciiMemoryMapParameters GetAsciiMemoryMapParameters(
std::vector<const BufferIntervalTreeNode*>& nodes) {
CHECK(!nodes.empty());
int64_t min_chunk_offset = std::numeric_limits<int64_t>::max();
int64_t end_of_last_occupied_chunk = -1;
int64_t memory_block_size = nodes.front()->chunk.offset;
for (const BufferIntervalTreeNode* node : nodes) {
min_chunk_offset = std::min(min_chunk_offset, node->chunk.offset);
end_of_last_occupied_chunk =
std::max(end_of_last_occupied_chunk, node->chunk.chunk_end());
memory_block_size = std::gcd(memory_block_size, node->chunk.offset);
memory_block_size = std::gcd(memory_block_size, node->chunk.chunk_end());
}
VLOG(3) << " min_chunk_offset: " << min_chunk_offset
<< " end_of_last_occupied_chunk: " << end_of_last_occupied_chunk
<< " memory_block_size: " << memory_block_size;
return {memory_block_size, end_of_last_occupied_chunk};
}
std::vector<std::vector<bool>> GetMemoryMap(
int64_t start, int64_t end, int64_t memory_block_size,
int64_t num_memory_blocks,
std::vector<const BufferIntervalTreeNode*>& nodes) {
int64_t total_time = end - start + 1;
std::vector<std::vector<bool>> memory_map(
num_memory_blocks, std::vector<bool>(total_time, false));
for (const BufferIntervalTreeNode* node : nodes) {
for (int64_t i = node->chunk.offset / memory_block_size;
i < node->chunk.chunk_end() / memory_block_size; ++i) {
for (int64_t j = std::max(node->start - start, int64_t{0});
j <= std::min(node->end - start, end - start); ++j) {
memory_map[i][j] = true;
}
}
}
return memory_map;
}
std::string BufferIntervalTreeNodesToString(
absl::Span<const BufferIntervalTreeNode* const> nodes) {
std::string output;
for (const BufferIntervalTreeNode* node : nodes) {
absl::StrAppend(&output, node->ToString(), "\n");
}
return output;
}
std::string MemoryMapToString(int64_t start, int64_t end,
int64_t memory_block_size, int64_t group_size,
std::vector<std::vector<bool>>& memory_map) {
int64_t num_memory_blocks = memory_map.size();
int64_t total_time = memory_map.front().size();
std::string output = "\n";
absl::StrAppend(&output, "Memory map for time: [", start, ",", end,
"], memory_block_size: ", memory_block_size,
", group_size: ", group_size, "\n\n");
for (int64_t i = num_memory_blocks - 1; i >= 0; --i) {
for (int64_t j = 0; j < total_time; ++j) {
if (group_size && j % group_size == 0) {
absl::StrAppend(&output, " ");
}
absl::StrAppend(&output, memory_map[i][j] ? "#" : ".");
}
absl::StrAppend(&output, " ", std::to_string((i + 1) * memory_block_size),
"\n");
}
for (int64_t j = start; j <= end; ++j) {
if (group_size && j % group_size == 0) {
absl::StrAppend(&output, " ");
}
absl::StrAppend(&output, std::to_string(j % 10));
}
absl::StrAppend(&output, "\n\n");
return output;
}
}
using absl::flat_hash_map;
using absl::flat_hash_set;
bool IsOdd(int x) { return (x % 2) == 1; }
bool IsEven(int x) { return (x % 2) == 0; }
HeapSimulator::Chunk HeapSimulator::Chunk::FromOffsetEnd(int64_t offset,
int64_t end) {
return FromOffsetSize(offset, end - offset);
}
HeapSimulator::Chunk HeapSimulator::Chunk::FromOffsetSize(int64_t offset,
int64_t size) {
return Chunk(offset, size);
}
std::string HeapSimulator::Chunk::ToString() const {
return absl::StrCat("[", offset, ",", chunk_end(), ")");
}
std::string BufferIntervalTreeNode::ToString() const {
return absl::StrCat("start: ", start, " end: ", end,
" chunk: ", chunk.ToString());
}
bool HeapSimulator::Chunk::OverlapsWith(Chunk other_chunk) const {
CHECK_NE(size, 0);
CHECK_NE(other_chunk.size, 0);
return offset < other_chunk.chunk_end() && other_chunk.offset < chunk_end();
}
std::ostream& operator<<(std::ostream& stream,
const HeapSimulator::Chunk& chunk) {
stream << chunk.ToString();
return stream;
}
absl::StatusOr<int64_t> HeapSimulator::MinimumMemoryForModule(
const HloSchedule& schedule,
const LogicalBuffer::SizeFunction& size_function) {
if (schedule.empty()) {
return 0;
}
const HloModule* module = schedule.module();
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
*module, schedule, *alias_analysis, size_function));
return result.heap_size;
}
absl::StatusOr<int64_t> HeapSimulator::MinimumMemoryForComputation(
const HloComputation& computation, const HloInstructionSequence& sequence,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function) {
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
computation, sequence, alias_analysis, size_function,
HeapSimulator::Options()));
return result.heap_size;
}
absl::StatusOr<int64_t> HeapSimulator::MinimumMemoryForComputation(
const HloComputation& computation, const HloInstructionSequence& sequence,
const HloAliasAnalysis& alias_analysis,
const LogicalBuffer::SizeFunction& size_function,
const HloSchedule* schedule) {
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result<HloValue> result,
HeapSimulator::Run(std::make_unique<NoFragmentationStatsHeap<HloValue>>(),
computation, sequence, alias_analysis, size_function,
schedule, HeapSimulator::Options()));
return result.heap_size;
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm, const HloModule& module,
const HloSchedule& schedule, const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const Options& options) {
HeapSimulator heap(std::move(algorithm), size_fn, options, &schedule);
const HloComputation* entry_computation = module.entry_computation();
const HloInstructionSequence& instruction_sequence =
schedule.sequence(entry_computation);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, alias_analysis, entry_computation));
TF_RETURN_IF_ERROR(heap.RunComputation(*entry_computation,
instruction_sequence, alias_analysis,
hlo_live_range.get()));
return heap.Finish();
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const Options& options) {
HeapSimulator heap(std::move(algorithm), size_fn, options,
nullptr);
HloSchedule schedule(computation.parent());
schedule.set_sequence(&computation, instruction_sequence);
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, alias_analysis, &computation,
false));
TF_RETURN_IF_ERROR(heap.RunComputation(computation, instruction_sequence,
alias_analysis, hlo_live_range.get()));
return heap.Finish();
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis,
const BufferValue::SizeFunction& size_fn, const HloSchedule* schedule,
const Options& options) {
HeapSimulator heap(std::move(algorithm), size_fn, options,
schedule);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(*schedule, alias_analysis, &computation));
TF_RETURN_IF_ERROR(heap.RunComputation(computation, instruction_sequence,
alias_analysis, hlo_live_range.get()));
return heap.Finish();
}
absl::Status HeapSimulator::RunComputation(
const HloComputation& computation,
const HloInstructionSequence& instruction_sequence,
const HloAliasAnalysis& alias_analysis, HloLiveRange* hlo_live_range) {
XLA_VLOG_LINES(1, computation.parent()->ToString());
XLA_VLOG_LINES(2, computation.ToString());
VLOG(1) << hlo_live_range->ToString();
HloDataflowAnalysis& dataflow_analysis = alias_analysis.dataflow_analysis();
std::vector<std::vector<const HloValue*>> buffers_defined(
hlo_live_range->schedule_end_time() + 1);
std::vector<std::vector<const HloValue*>> buffers_freed(
hlo_live_range->schedule_end_time() + 1);
std::vector<const HloValue*> values_to_assign;
values_to_assign.reserve(dataflow_analysis.values().size());
auto& buffer_live_ranges = hlo_live_range->buffer_live_ranges();
for (const HloValue* value : dataflow_analysis.values()) {
if (!buffer_live_ranges.contains(value)) {
continue;
}
if (IgnoreBuffer(value)) {
continue;
}
values_to_assign.push_back(value);
}
absl::c_sort(values_to_assign,
[&](const HloValue* value1, const HloValue* value2) {
const auto& live_range1 = buffer_live_ranges.at(value1);
const auto& live_range2 = buffer_live_ranges.at(value2);
return std::forward_as_tuple(live_range1.start,
live_range1.end, value1->id()) <
std::forward_as_tuple(live_range2.start,
live_range2.end, value2->id());
});
for (const HloValue* value : values_to_assign) {
auto live_range = buffer_live_ranges.at(value);
buffers_defined[live_range.start].push_back(value);
buffers_freed[live_range.end].push_back(value);
}
absl::flat_hash_map<const HloBuffer*, const HloValue*> first_allocated_value;
VLOG(1) << "Program time" << hlo_live_range->schedule_end_time();
for (const HloBuffer& buffer : alias_analysis.buffers()) {
int64_t size = 0;
for (const HloValue* value : buffer.values()) {
size = std::max(size, size_fn_(*value));
}
for (const HloValue* value : buffer.values()) {
buffer_sizes_[value] = size;
}
}
for (int64_t i = 0; i < hlo_live_range->schedule_end_time() + 1; ++i) {
VLOG(1) << "Time step: " << i;
for (const HloValue* value : buffers_defined[i]) {
bool shared = false;
VLOG(1) << "Start buffer: " << value->ToShortString();
const HloBuffer* hlo_buffer =
&alias_analysis.GetBufferContainingValue(*value);
if (first_allocated_value.count(hlo_buffer) != 0) {
ShareBuffer(value, first_allocated_value[hlo_buffer],
value->instruction());
VLOG(1) << " ShareWith"
<< first_allocated_value[hlo_buffer]->ToShortString();
continue;
}
if (options_.may_reuse_operand_buffers &&
hlo_buffer->values().size() == 1) {
for (const HloInstruction* operand : value->instruction()->operands()) {
const HloValueSet operand_value_set =
dataflow_analysis.GetValueSet(operand);
for (const HloValue* operand_value : operand_value_set.values()) {
const HloBuffer* operand_buffer =
&alias_analysis.GetBufferContainingValue(*operand_value);
if (operand_buffer->values().size() > 1) {
continue;
}
auto it = buffer_live_ranges.find(operand_value);
if (it == buffer_live_ranges.end()) {
continue;
}
auto& operand_live_range = it->second;
auto& user_live_range = buffer_live_ranges[value];
if (operand_live_range.end != i) {
continue;
}
if (IgnoreBuffer(operand_value)) {
continue;
}
if (!absl::c_linear_search(buffers_freed[i], operand_value)) {
continue;
}
if (value->instruction()->IsUserOf(operand_value->instruction()) &&
value->instruction()->opcode() != HloOpcode::kCopy &&
dataflow_analysis.CanShareOperandBufferWithUser(
operand_value->instruction(), operand_value->index(),
value->instruction(), value->index())) {
Free(operand_value, operand_value->instruction());
buffers_freed[i].erase(
std::remove(buffers_freed[i].begin(), buffers_freed[i].end(),
operand_value),
buffers_freed[i].end());
ShareBuffer(value, operand_value, value->instruction());
operand_live_range.end = user_live_range.end;
VLOG(1) << "Sharing " << value->ToShortString() << " with "
<< operand_value->ToShortString()
<< ", size:" << size_fn_(*value);
shared = true;
break;
}
}
if (shared) {
break;
}
}
}
if (!shared) {
Alloc(value, value->instruction());
first_allocated_value[hlo_buffer] = value;
}
}
if (!buffers_freed[i].empty()) {
VLOG(1) << "Free Buffer: ";
}
for (const HloValue* value : buffers_freed[i]) {
VLOG(1) << " " << value->ToShortString();
Free(value, value->instruction());
}
}
return absl::OkStatus();
}
HeapSimulator::HeapSimulator(std::unique_ptr<HeapAlgorithm<HloValue>> algorithm,
const BufferValue::SizeFunction& size_fn,
const Options& options,
const HloSchedule* schedule)
: no_fragmentation_stats_(
std::make_unique<NoFragmentationStatsHeap<HloValue>>()),
algorithm_(std::move(algorithm)),
size_fn_(size_fn),
options_(options),
schedule_(schedule) {
debug_trace_.set_whole_module_simulation(schedule_ != nullptr);
}
HeapSimulator::~HeapSimulator() {}
bool HeapSimulator::IgnoreBuffer(const HloValue* buffer) const {
if (!options_.alloc_constants &&
buffer->instruction()->opcode() == HloOpcode::kConstant) {
return true;
}
return options_.buffers_to_assign != nullptr &&
!options_.buffers_to_assign->contains(buffer);
}
void HeapSimulator::Alloc(const HloValue* buffer,
const HloInstruction* instruction) {
CHECK(!allocated_buffers_.contains(buffer))
<< "Alloc called on allocated buffer: " << *buffer;
CHECK(!freed_buffers_.contains(buffer))
<< "Alloc called on freed buffer: " << *buffer;
allocated_buffers_.insert(buffer);
const int64_t size = GetBufferSize(buffer);
algorithm_->Alloc(buffer, size);
no_fragmentation_stats_->Alloc(buffer, size);
FillDebugTrace(HeapSimulatorTrace::Event::ALLOC, buffer, instruction,
nullptr);
}
void HeapSimulator::Free(const HloValue* buffer,
const HloInstruction* instruction) {
const int64_t size = GetBufferSize(buffer);
algorithm_->Free(buffer, size);
no_fragmentation_stats_->Free(buffer, size);
FillDebugTrace(HeapSimulatorTrace::Event::FREE, buffer, instruction, nullptr);
}
void HeapSimulator::ShareBuffer(const HloValue* buffer, const HloValue* shared,
const HloInstruction* instruction) {
algorithm_->ShareWith(buffer, shared, GetBufferSize(shared));
no_fragmentation_stats_->ShareWith(buffer, shared, GetBufferSize(shared));
FillDebugTrace(HeapSimulatorTrace::Event::SHARE_WITH, buffer, instruction,
shared);
}
int64_t HeapSimulator::GetBufferSize(const HloValue* buffer) const {
auto it = buffer_sizes_.find(buffer);
CHECK(it != buffer_sizes_.end());
return it->second;
}
absl::StatusOr<HeapSimulator::Result<HloValue>> HeapSimulator::Finish() {
TF_ASSIGN_OR_RETURN(Result<HloValue> result, algorithm_->Finish());
size_t total_chunk_count = absl::c_accumulate(
result.heap_results, static_cast<size_t>(0),
[&](size_t lhs, const HeapResult<HloValue>& rhs) -> size_t {
return lhs + rhs.chunk_map.size();
});
if (total_chunk_count != 0) {
if (options_.buffers_to_assign != nullptr) {
CHECK_EQ(options_.buffers_to_assign->size(), total_chunk_count);
}
}
TF_ASSIGN_OR_RETURN(const Result<HloValue> no_frag_result,
no_fragmentation_stats_->Finish());
result.fragmentation_size = result.heap_size - no_frag_result.heap_size;
result.debug_trace.Swap(&debug_trace_);
return result;
}
void HeapSimulator::FillDebugTrace(HeapSimulatorTrace::Event::Kind kind,
const HloValue* buffer,
const HloInstruction* instruction,
const HloValue* share_with_canonical) {
HeapSimulatorTrace::Event* event = debug_trace_.add_events();
event->set_kind(kind);
event->set_buffer_id(buffer->id());
*event->mutable_computation_name() =
std::string(instruction->parent()->name());
*event->mutable_instruction_name() = std::string(instruction->name());
if (kind == HeapSimulatorTrace::Event::SHARE_WITH) {
CHECK(share_with_canonical != nullptr);
event->set_share_with_canonical_id(share_with_canonical->id());
} else {
CHECK(share_with_canonical == nullptr);
}
}
template <typename BufferType>
void NoFragmentationStatsHeap<BufferType>::Alloc(const BufferType* buffer,
int64_t size) {
current_heap_size_ += size;
if (current_heap_size_ > max_heap_size_) {
max_heap_size_ = current_heap_size_;
}
}
template <typename BufferType>
void NoFragmentationStatsHeap<BufferType>::AccountForSubcomputationMemory(
const HloInstruction* instruction, int64_t alloc_size_by_instruction) {
int64_t max_subcomputation_bytes = 0;
if (max_subcomputation_bytes > 0 &&
(instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional)) {
max_subcomputation_bytes -= alloc_size_by_instruction;
}
max_heap_size_ =
std::max(max_heap_size_, current_heap_size_ + max_subcomputation_bytes);
}
template <typename BufferType>
void NoFragmentationStatsHeap<BufferType>::Free(const BufferType* buffer,
int64_t size) {
current_heap_size_ -= size;
}
template <typename BufferType>
absl::StatusOr<HeapSimulator::Result<BufferType>>
NoFragmentationStatsHeap<BufferType>::Finish() {
Result result;
result.heap_size = max_heap_size_;
return result;
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::GlobalDecreasingSizeBestFitHeap(
int64_t alignment, Type type, BufferIntervalCompare buffer_interval_compare,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type)
: alignment_(alignment),
slice_time_permutation_iteration_type_(
slice_time_permutation_iterator_type) {
if (type == kTemporal) {
buffer_interval_compare_ = GetTemporalBufferIntervalCompare();
CHECK(buffer_interval_compare == nullptr);
} else if (type == kSpatial) {
buffer_interval_compare_ = GetSpatialBufferIntervalCompare();
CHECK(buffer_interval_compare == nullptr);
} else {
CHECK(type == kCustom);
CHECK(buffer_interval_compare != nullptr);
buffer_interval_compare_ = buffer_interval_compare;
}
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferIntervalCompare
GlobalDecreasingSizeBestFitHeap<BufferType>::GetTemporalBufferIntervalCompare()
const {
return LessThanByKey([this](const BufferInterval& x) {
int64_t x_end = x.end;
for (auto colocation : GetTransitiveColocations(x)) {
x_end = std::max(x_end, buffer_intervals_.at(colocation).end);
}
return std::make_tuple(x.start - x_end, -x.size, std::cref(*x.buffer));
});
}
template <typename BufferType>
SliceTimePermutationIterator::Ty GlobalDecreasingSizeBestFitHeap<
BufferType>::slice_time_permutation_iterator_type() const {
return slice_time_permutation_iteration_type_;
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::BufferIntervalCompare
GlobalDecreasingSizeBestFitHeap<BufferType>::GetSpatialBufferIntervalCompare() {
return LessThanByKey([](const BufferInterval& x) {
return std::make_tuple(-x.size, x.start - x.end, std::cref(*x.buffer));
});
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::Alloc(
const BufferType* buffer, int64_t size) {
if (size == 0) {
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(0, 0));
return;
}
auto emplace_result = buffer_intervals_.emplace(
buffer, BufferInterval{buffer, size, current_time_, -1, {}, true});
CHECK(emplace_result.second);
++current_time_;
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::ShareWith(
const BufferType* buffer, const BufferType* share_with, int64_t size) {
if (size == 0) {
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(0, 0));
return;
}
CHECK_NE(buffer_intervals_.count(share_with), 0);
buffer_intervals_[share_with].colocations.push_back(buffer);
auto emplace_result = buffer_intervals_.emplace(
buffer, BufferInterval{buffer, size, current_time_, -1, {}, false});
CHECK(emplace_result.second);
++current_time_;
}
template <typename BufferType>
absl::flat_hash_set<const BufferType*>
GlobalDecreasingSizeBestFitHeap<BufferType>::GetTransitiveColocations(
const BufferInterval& interval) const {
absl::flat_hash_set<const BufferType*> result;
std::vector<const BufferInterval*> worklist = {&interval};
while (!worklist.empty()) {
const BufferInterval* item = worklist.back();
worklist.pop_back();
for (const BufferType* buffer_colocated : item->colocations) {
if (result.insert(buffer_colocated).second) {
worklist.push_back(&buffer_intervals_.at(buffer_colocated));
}
}
}
return result;
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::Free(const BufferType* buffer,
int64_t size) {
if (size == 0) {
return;
}
BufferInterval& buffer_interval = FindOrDie(buffer_intervals_, buffer);
CHECK_EQ(buffer_interval.buffer, buffer);
CHECK_EQ(buffer_interval.size, size);
CHECK_EQ(buffer_interval.end, -1);
if (buffer_interval.end != -1) {
return;
}
buffer_interval.end = current_time_;
++current_time_;
}
using Chunk = HeapSimulator::Chunk;
void BufferIntervalTree::Add(int64_t start, int64_t end, const Chunk& chunk) {
node_storage_.emplace_back(BufferIntervalTreeNode{
start, end, end, chunk,
nullptr, nullptr, nullptr});
if (root_ == nullptr) {
root_ = &node_storage_.back();
return;
}
BufferIntervalTreeNode* parent = root_;
while (true) {
parent->subtree_end = std::max(parent->subtree_end, end);
if (parent->start > start) {
if (parent->left == nullptr) {
parent->left = &node_storage_.back();
node_storage_.back().parent = parent;
return;
}
parent = parent->left;
} else {
if (parent->right == nullptr) {
parent->right = &node_storage_.back();
node_storage_.back().parent = parent;
return;
}
parent = parent->right;
}
}
}
bool BufferIntervalTree::Remove(int64_t start, int64_t end,
const Chunk& chunk) {
BufferIntervalTreeNode* to_delete = root_;
while (to_delete != nullptr) {
if (to_delete->start == start && to_delete->end == end &&
to_delete->chunk.offset == chunk.offset) {
break;
}
if (start < to_delete->start) {
to_delete = to_delete->left;
} else {
to_delete = to_delete->right;
}
}
if (to_delete == nullptr) {
return false;
}
std::function<void(BufferIntervalTreeNode*)> fix_up =
[&](BufferIntervalTreeNode* node) {
if (node == nullptr) {
return;
}
node->subtree_end = node->end;
if (node->left) {
node->subtree_end =
std::max(node->subtree_end, node->left->subtree_end);
}
if (node->right) {
node->subtree_end =
std::max(node->subtree_end, node->right->subtree_end);
}
fix_up(node->parent);
};
if (to_delete->right == nullptr) {
if (root_ == to_delete) {
root_ = to_delete->left;
return true;
}
if (to_delete == to_delete->parent->left) {
to_delete->parent->left = to_delete->left;
}
if (to_delete == to_delete->parent->right) {
to_delete->parent->right = to_delete->left;
}
if (to_delete->left) {
to_delete->left->parent = to_delete->parent;
}
fix_up(to_delete);
} else {
BufferIntervalTreeNode* to_promote = to_delete->right;
while (to_promote->left != nullptr) {
to_promote = to_promote->left;
}
to_delete->start = to_promote->start;
to_delete->end = to_promote->end;
to_delete->subtree_end = to_promote->subtree_end;
to_delete->chunk = to_promote->chunk;
auto to_promote_parent = to_promote->parent;
if (to_promote_parent->left == to_promote) {
to_promote_parent->left = to_promote->right;
} else {
to_promote_parent->right = to_promote->right;
}
if (to_promote->right) {
to_promote->right->parent = to_promote_parent;
}
fix_up(to_promote_parent);
}
return true;
}
std::vector<Chunk> BufferIntervalTree::ChunksOverlappingInTime(
int64_t start, int64_t end) const {
std::vector<Chunk> result;
for (const BufferIntervalTreeNode* node :
NodesOverlappingInTime(start, end)) {
result.push_back(node->chunk);
}
return result;
}
std::vector<const BufferIntervalTreeNode*>
BufferIntervalTree::NodesOverlappingInTime(int64_t start, int64_t end) const {
std::vector<const BufferIntervalTreeNode*> result;
if (root_ == nullptr) {
return result;
}
std::vector<const BufferIntervalTreeNode*> visiting_stack;
visiting_stack.push_back(root_);
while (!visiting_stack.empty()) {
const BufferIntervalTreeNode* top = visiting_stack.back();
visiting_stack.pop_back();
if (start > top->subtree_end) {
continue;
}
if (top->left != nullptr) {
visiting_stack.push_back(top->left);
}
if (top->start <= end && top->end >= start) {
result.push_back(top);
}
if (end < top->start) {
continue;
}
if (top->right != nullptr) {
visiting_stack.push_back(top->right);
}
}
return result;
}
std::string BufferIntervalTree::NodesOverlappingInTimeToAsciiArt(
int64_t start, int64_t end, int64_t group_size) const {
std::vector<const BufferIntervalTreeNode*> nodes =
NodesOverlappingInTime(start, end);
if (nodes.empty()) {
return "No nodes overlapping in time. Memory is free!";
}
auto [memory_block_size, end_of_last_occupied_chunk] =
GetAsciiMemoryMapParameters(nodes);
CHECK_GE(end_of_last_occupied_chunk, 0);
CHECK_NE(memory_block_size, 0);
int64_t total_time = end - start + 1;
int64_t num_memory_blocks = end_of_last_occupied_chunk / memory_block_size;
if (total_time > kMaxMemoryMapDimensionSize ||
num_memory_blocks > kMaxMemoryMapDimensionSize) {
std::string output;
absl::StrAppend(
&output,
"\nCannot print memory usage to ASCII art. Printing nodes instead!\n\n",
BufferIntervalTreeNodesToString(nodes));
return output;
}
std::vector<std::vector<bool>> memory_map =
GetMemoryMap(start, end, memory_block_size, num_memory_blocks, nodes);
return MemoryMapToString(start, end, memory_block_size, group_size,
memory_map);
}
std::vector<int64_t> BufferIntervalTree::MemoryUsedInInterval(
int64_t start, int64_t end) const {
int64_t total_time = end - start + 1;
CHECK_GE(total_time, 0);
std::vector<const BufferIntervalTreeNode*> nodes =
NodesOverlappingInTime(start, end);
std::vector<int64_t> memory_used_in_interval(total_time, 0);
for (const BufferIntervalTreeNode* node : nodes) {
int64_t node_start = std::max(node->start, start);
int64_t node_end = std::min(node->end, end);
for (int64_t time = node_start; time <= node_end; ++time) {
memory_used_in_interval[time - start] += node->chunk.size;
}
}
return memory_used_in_interval;
}
int64_t BufferIntervalTree::HeapSizeInInterval(const int64_t start,
const int64_t end) const {
CHECK_LE(start, end);
std::vector<const BufferIntervalTreeNode*> nodes =
NodesOverlappingInTime(start, end);
int64_t max_memory_used = 0;
for (const BufferIntervalTreeNode* node : nodes) {
max_memory_used = std::max(max_memory_used, node->chunk.chunk_end());
}
return max_memory_used;
}
template <typename BufferType>
std::string
GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval::ToString() const {
return absl::StrCat("{ ",
"buffer: {", (buffer ? buffer->ToString() : "null"),
"}, ",
"size: ", size, ", ",
"start: ", start, ", ",
"end: ", end, ", ",
"num_colocations: ", colocations.size(), ", ",
"need_allocation: ", need_allocation,
" }");
}
template <typename BufferType>
const
typename GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
CreateConstInterval(const BufferInterval& full_buffer_interval) {
return SlicedBufferInterval(full_buffer_interval);
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
CreateMutableInterval(BufferInterval& full_buffer_interval) {
return SlicedBufferInterval(full_buffer_interval, &full_buffer_interval);
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::Slice(
absl::Span<const int64_t> slice_sizes_sorted_by_offset) {
if (slice_sizes_sorted_by_offset.empty()) {
slice_sizes_sorted_by_offset_ = {full_buffer_interval_.size};
make_free_chunks_intervals_ = {full_buffer_interval_};
return;
}
const int64_t min_slice_size =
*absl::c_min_element(slice_sizes_sorted_by_offset);
slice_sizes_sorted_by_offset_ = std::vector<int64_t>(
slice_sizes_sorted_by_offset.begin(), slice_sizes_sorted_by_offset.end());
size_t num_slices = slice_sizes_sorted_by_offset.size();
make_free_chunks_intervals_.clear();
make_free_chunks_intervals_.reserve(num_slices);
int64_t size_total = 0;
absl::InlinedVector<const BufferType*, 2> empty_colocations;
for (int i = 0; i < num_slices; ++i) {
int64_t new_size = slice_sizes_sorted_by_offset[i];
size_total += new_size;
make_free_chunks_intervals_.push_back(BufferInterval{
full_buffer_interval_.buffer,
(i == num_slices - 1 ? full_buffer_interval_.size : min_slice_size),
0,
full_buffer_interval_.end,
(i == num_slices - 1 ? full_buffer_interval_.colocations
: empty_colocations),
full_buffer_interval_.need_allocation});
}
CHECK_EQ(size_total, full_buffer_interval_.size)
<< " slice sizes: {" << absl::StrJoin(slice_sizes_sorted_by_offset, ", ")
<< "};";
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
UpdateExclusiveSliceStartTimes(
const std::vector<int64_t>& exclusive_start_times) {
std::vector<int64_t> inclusive_start_times = exclusive_start_times;
absl::c_for_each(inclusive_start_times,
[](int64_t& t) { t = ExclusiveToInclusiveStartTime(t); });
UpdateInclusiveSliceStartTimes(inclusive_start_times);
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
UpdateInclusiveSliceStartTimes(
const std::vector<int64_t>& inclusive_start_times) {
CHECK_EQ(inclusive_start_times.size(), num_slices());
CHECK(mutable_full_buffer_interval_ != nullptr);
mutable_full_buffer_interval_->start = inclusive_start_times.front();
for (size_t slice_time = 0; slice_time < num_slices(); ++slice_time) {
make_free_chunks_intervals_[slice_time].start =
inclusive_start_times[slice_time];
if (slice_time != num_slices() - 1) {
make_free_chunks_intervals_[slice_time].end =
ExclusiveToInclusiveEndTime(inclusive_start_times[slice_time + 1]);
} else {
make_free_chunks_intervals_[slice_time].end = full_buffer_interval_.end;
}
}
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::UpdateEndTime(int64_t end_time) {
CHECK(mutable_full_buffer_interval_ != nullptr);
mutable_full_buffer_interval_->end = end_time;
make_free_chunks_intervals_.back().end = end_time;
}
template <typename BufferType>
const typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval&
GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::full_buffer_interval() const {
return full_buffer_interval_;
}
template <typename BufferType>
const std::vector<int64_t>& GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::SliceSizesSortedByOffset() const {
return slice_sizes_sorted_by_offset_;
}
template <typename BufferType>
std::vector<int64_t> GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::inclusive_start_times() const {
std::vector<int64_t> inclusive_start_times;
inclusive_start_times.reserve(num_slices());
for (const BufferInterval& buffer_interval : make_free_chunks_intervals_) {
inclusive_start_times.push_back(buffer_interval.start);
}
return inclusive_start_times;
}
template <typename BufferType>
const typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval&
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
IntervalForMakeFreeChunks(int64_t slice_time) const {
CHECK_LT(slice_time, num_slices());
return make_free_chunks_intervals_[slice_time];
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedBufferInterval::
SlicedBufferInterval(const BufferInterval& full_buffer_interval,
BufferInterval* mutable_full_buffer_interval)
: full_buffer_interval_(full_buffer_interval),
mutable_full_buffer_interval_(mutable_full_buffer_interval) {
Slice({});
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedBufferInterval::ToString() const {
return absl::StrCat(
"{ full_buffer_interval: ", full_buffer_interval_.ToString(), ", ",
"MakeFreeChunks intervals: { ",
absl::StrJoin(make_free_chunks_intervals_, ", ",
[](std::string* out, const BufferInterval& interval) {
absl::StrAppend(out, interval.ToString());
}),
" }, ", "slize_sizes_sorted_by_offsets: { ",
absl::StrJoin(slice_sizes_sorted_by_offset_, ", "), " } }");
}
namespace {
class SliceTimePermutationValidator {
public:
explicit SliceTimePermutationValidator(
const SlicedAllocationData* original_slices)
: original_num_slices_(original_slices ? original_slices->num_slices()
: 0) {
if (original_num_slices_ <= 0) {
return;
}
slice_time_to_inclusive_schedule_time_ =
original_slices->SortedInclusiveStartTimes();
absl::c_sort(slice_time_to_inclusive_schedule_time_);
original_slice_sizes_and_start_times_pairwise_sorted_.reserve(
original_num_slices_);
for (const AllocatedSlice& slice :
original_slices->slices_sorted_by_offset) {
original_slice_sizes_and_start_times_pairwise_sorted_.push_back(
std::make_pair(slice.size, slice.inclusive_start_time));
}
absl::c_sort(original_slice_sizes_and_start_times_pairwise_sorted_);
sizes_sorted_by_offset_ = original_slices->SizesSortedByOffset();
}
bool IsValid(absl::Span<const int64_t> permutation) {
if (original_num_slices_ <= 0) {
return true;
}
std::vector<std::pair<int64_t, int64_t>>
proposed_slice_sizes_and_start_times_pairwise_sorted;
proposed_slice_sizes_and_start_times_pairwise_sorted.reserve(
original_num_slices_);
CHECK_EQ(sizes_sorted_by_offset_.size(), original_num_slices_);
CHECK_EQ(permutation.size(), original_num_slices_);
for (int i = 0; i < original_num_slices_; ++i) {
proposed_slice_sizes_and_start_times_pairwise_sorted.push_back(
std::make_pair(
sizes_sorted_by_offset_[i],
slice_time_to_inclusive_schedule_time_[permutation[i]]));
}
absl::c_sort(proposed_slice_sizes_and_start_times_pairwise_sorted);
bool allowed = (original_slice_sizes_and_start_times_pairwise_sorted_ ==
proposed_slice_sizes_and_start_times_pairwise_sorted);
VLOG(3) << [&]() {
auto export_pair = [](std::string* out,
const std::pair<int64_t, int64_t>& p) {
absl::StrAppend(out, "<", p.first, ", ", p.second, ">");
};
return absl::StrCat(
"Slice permutation ", (allowed ? "allowed" : "disallowed"),
". Original slice <size, start_time> mapping: ",
absl::StrJoin(original_slice_sizes_and_start_times_pairwise_sorted_,
", ", export_pair),
". Proposed mapping: ",
absl::StrJoin(proposed_slice_sizes_and_start_times_pairwise_sorted,
", ", export_pair),
".");
}();
return allowed;
}
private:
int64_t original_num_slices_;
std::vector<int64_t> slice_time_to_inclusive_schedule_time_;
std::vector<std::pair<int64_t, int64_t>>
original_slice_sizes_and_start_times_pairwise_sorted_;
std::vector<int64_t> sizes_sorted_by_offset_;
};
class ObservedPermutationManager {
public:
explicit ObservedPermutationManager(
absl::Span<const int64_t> inclusive_start_times) {
slice_time_to_inclusive_start_time_ = std::vector<int64_t>(
inclusive_start_times.begin(), inclusive_start_times.end());
absl::c_sort(slice_time_to_inclusive_start_time_);
}
bool Insert(absl::Span<const int64_t> permutation) {
std::vector<int64_t> permutation_inclusive_start_times;
permutation_inclusive_start_times.reserve(permutation.size());
for (int64_t slice_time : permutation) {
permutation_inclusive_start_times.push_back(
slice_time_to_inclusive_start_time_[slice_time]);
}
return observed_inclusive_start_time_permutation_
.insert(permutation_inclusive_start_times)
.second;
}
void Clear() { observed_inclusive_start_time_permutation_.clear(); }
protected:
std::vector<int64_t> slice_time_to_inclusive_start_time_;
absl::flat_hash_set<std::vector<int64_t>>
observed_inclusive_start_time_permutation_;
};
class SliceTimeAllPermutationIterator : public SliceTimePermutationIterator {
public:
explicit SliceTimeAllPermutationIterator(int64_t num_slices)
: num_slices_(num_slices), permutation_(num_slices, 0) {}
~SliceTimeAllPermutationIterator() override = default;
void Begin() override {
done_ = (num_slices_ <= 0);
for (int64_t i = 0; i < num_slices_; ++i) {
permutation_[i] = i;
}
}
bool Done() const override { return done_; }
void Next() override {
if (Done()) {
return;
}
done_ = !absl::c_next_permutation(permutation_);
}
absl::Span<const int64_t> Get() const override { return permutation_; }
private:
SliceTimeAllPermutationIterator() = default;
int64_t num_slices_;
bool done_ = true;
std::vector<int64_t> permutation_;
};
class SliceTimePreferredPermutationIterator
: public SliceTimePermutationIterator {
public:
SliceTimePreferredPermutationIterator(
int64_t num_slices,
const SlicedAllocationData* original_sliced_allocation)
: num_slices_(num_slices),
fixed_permutation_values_(num_slices, false),
permutation_(num_slices, 0) {
if (!original_sliced_allocation) {
slice_times_available_for_permutation_.reserve(num_slices_);
for (int64_t slice_time = 0; slice_time < num_slices_; ++slice_time) {
slice_times_available_for_permutation_.push_back(slice_time);
}
return;
}
absl::flat_hash_map<const AllocatedSlice*, int64_t>
slice_to_slice_time_map =
BuildSliceToSliceTimeMap(original_sliced_allocation);
const AllocatedSlice* first_slice = nullptr;
if (!original_sliced_allocation->slices_sorted_by_offset.empty()) {
first_slice =
&original_sliced_allocation->slices_sorted_by_offset.front();
}
for (int offset_index = 0; offset_index < num_slices_; ++offset_index) {
CHECK(first_slice);
const AllocatedSlice& slice =
original_sliced_allocation->slices_sorted_by_offset[offset_index];
if (slice.size != first_slice->size) {
fixed_permutation_values_[offset_index] = true;
permutation_[offset_index] = slice_to_slice_time_map[&slice];
continue;
}
slice_times_available_for_permutation_.push_back(
slice_to_slice_time_map[&slice]);
}
absl::c_sort(slice_times_available_for_permutation_);
}
~SliceTimePreferredPermutationIterator() override = default;
void Begin() override {
permutation_type_ = NextPermutationType(PermutationType::kUninitialized);
SetUpPermutationForCurrentType();
}
bool Done() const override {
return permutation_type_ == PermutationType::kDone;
}
void Next() override {
permutation_type_ = NextPermutationType(permutation_type_);
SetUpPermutationForCurrentType();
}
absl::Span<const int64_t> Get() const override { return permutation_; }
private:
enum class PermutationType {
kUninitialized,
kSmallerOffsetSmallerSliceTime,
kSmallerOffsetLargerSliceTime,
kDistributeSmallSliceTimesAroundMiddleOffset,
kDone,
};
SliceTimePreferredPermutationIterator() = default;
PermutationType NextPermutationType(PermutationType ty) {
switch (ty) {
case PermutationType::kUninitialized:
if (num_slices_ <= 0) {
return PermutationType::kDone;
}
return PermutationType::kSmallerOffsetSmallerSliceTime;
case PermutationType::kSmallerOffsetSmallerSliceTime:
if (num_slices_ <= 1) {
return PermutationType::kDone;
}
return PermutationType::kSmallerOffsetLargerSliceTime;
case PermutationType::kSmallerOffsetLargerSliceTime:
if (num_slices_ <= 2) {
return PermutationType::kDone;
}
return PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset;
case PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset:
case PermutationType::kDone:
return PermutationType::kDone;
}
}
absl::flat_hash_map<const AllocatedSlice*, int64_t> BuildSliceToSliceTimeMap(
const SlicedAllocationData* original_sliced_allocation) {
CHECK(original_sliced_allocation);
std::vector<const AllocatedSlice*> slice_time_to_slice;
slice_time_to_slice.reserve(num_slices_);
for (const AllocatedSlice& slice :
original_sliced_allocation->slices_sorted_by_offset) {
slice_time_to_slice.push_back(&slice);
}
absl::c_sort(slice_time_to_slice, [](const AllocatedSlice* lhs,
const AllocatedSlice* rhs) {
return std::make_tuple(lhs->inclusive_start_time, lhs->offset) <
std::make_tuple(rhs->inclusive_start_time, rhs->offset);
});
absl::flat_hash_map<const AllocatedSlice*, int64_t> map;
for (int slice_time = 0; slice_time < slice_time_to_slice.size();
++slice_time) {
map[slice_time_to_slice[slice_time]] = slice_time;
}
return map;
}
void SetUpPermutationForCurrentType() {
CHECK(permutation_type_ != PermutationType::kUninitialized);
if (Done()) {
return;
}
int permutation_index = NextAvailablePermutationIndex(-1);
for (int i = slice_times_available_for_permutation_.size() - 1; i >= 0;
--i) {
if (permutation_type_ == PermutationType::kSmallerOffsetLargerSliceTime ||
(permutation_type_ ==
PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset &&
IsOdd(i))) {
CHECK_LT(permutation_index, permutation_.size());
permutation_[permutation_index] =
slice_times_available_for_permutation_[i];
permutation_index = NextAvailablePermutationIndex(permutation_index);
}
}
for (int i = 0; i < slice_times_available_for_permutation_.size(); ++i) {
if (permutation_type_ ==
PermutationType::kSmallerOffsetSmallerSliceTime ||
(permutation_type_ ==
PermutationType::kDistributeSmallSliceTimesAroundMiddleOffset &&
IsEven(i))) {
CHECK_LT(permutation_index, permutation_.size());
permutation_[permutation_index] =
slice_times_available_for_permutation_[i];
permutation_index = NextAvailablePermutationIndex(permutation_index);
}
}
CHECK_EQ(permutation_index, permutation_.size());
}
int NextAvailablePermutationIndex(int permutation_index) {
do {
++permutation_index;
} while (permutation_index < permutation_.size() &&
fixed_permutation_values_[permutation_index]);
return permutation_index;
}
int64_t num_slices_;
std::vector<bool> fixed_permutation_values_;
std::vector<int64_t> slice_times_available_for_permutation_;
PermutationType permutation_type_ = PermutationType::kUninitialized;
std::vector<int64_t> permutation_;
};
class ComposedSliceTimePermutationIterator
: public SliceTimePermutationIterator {
public:
ComposedSliceTimePermutationIterator(
SliceTimePermutationValidator validator,
ObservedPermutationManager seen_manager,
std::unique_ptr<SliceTimePermutationIterator> base_iterator)
: validator_(std::move(validator)),
seen_(std::move(seen_manager)),
base_iterator_(std::move(base_iterator)) {}
~ComposedSliceTimePermutationIterator() override = default;
void Begin() override { NextImpl(true); }
bool Done() const override { return base_iterator_->Done(); }
void Next() override { NextImpl(false); }
absl::Span<const int64_t> Get() const override {
return base_iterator_->Get();
}
private:
void NextImpl(bool initialize) {
if (initialize) {
seen_.Clear();
base_iterator_->Begin();
}
if (Done()) {
return;
}
if (!initialize) {
base_iterator_->Next();
}
while (!Done() && (!validator_.IsValid(Get()) || !seen_.Insert(Get()))) {
base_iterator_->Next();
}
}
SliceTimePermutationValidator validator_;
ObservedPermutationManager seen_;
std::unique_ptr<SliceTimePermutationIterator> base_iterator_;
};
}
std::unique_ptr<SliceTimePermutationIterator>
SliceTimePermutationIterator::CreateForNewAllocation(
Ty ty, absl::Span<const int64_t> inclusive_slice_start_times) {
switch (ty) {
case Ty::kAll:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(nullptr),
ObservedPermutationManager(inclusive_slice_start_times),
std::make_unique<SliceTimeAllPermutationIterator>(
inclusive_slice_start_times.size()));
case Ty::kPreferred:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(nullptr),
ObservedPermutationManager(inclusive_slice_start_times),
std::make_unique<SliceTimePreferredPermutationIterator>(
inclusive_slice_start_times.size(),
nullptr));
}
}
std::unique_ptr<SliceTimePermutationIterator>
SliceTimePermutationIterator::CreateForRepack(
Ty ty, const SlicedAllocationData* original_sliced_allocation) {
int64_t num_slices = 1;
if (original_sliced_allocation) {
num_slices = original_sliced_allocation->num_slices();
}
std::vector<int64_t> inclusive_start_times;
if (original_sliced_allocation) {
inclusive_start_times =
original_sliced_allocation->SortedInclusiveStartTimes();
} else {
inclusive_start_times.push_back(0);
}
switch (ty) {
case Ty::kAll:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(original_sliced_allocation),
ObservedPermutationManager(inclusive_start_times),
std::make_unique<SliceTimeAllPermutationIterator>(num_slices));
case Ty::kPreferred:
return std::make_unique<ComposedSliceTimePermutationIterator>(
SliceTimePermutationValidator(original_sliced_allocation),
ObservedPermutationManager(inclusive_start_times),
std::make_unique<SliceTimePreferredPermutationIterator>(
num_slices, original_sliced_allocation));
}
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FreeChunkPiece::ToString() const {
return absl::StrCat("{ dimensions: ", dimensions.ToString(), ", free at: t",
earliest_free_slice_time, " }");
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FreeChunkRoot::ToString() const {
return absl::StrCat(
"{ chunk: ", chunk.ToString(), ", pieces: { ",
absl::StrJoin(
pieces.rbegin(), pieces.rend(), ", ",
[](std::string* out, const auto& offset_sliced_free_chunk_pair) {
absl::StrAppend(out,
offset_sliced_free_chunk_pair.second.ToString());
}),
" } }");
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
FreeChunkRoot::FreeChunkRoot(const Chunk& free_chunk,
int64_t free_chunk_slice_time)
: chunk(free_chunk),
pieces({{free_chunk.offset, {free_chunk_slice_time, free_chunk}}}) {}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
FreeChunkRoot::Update(const Chunk& free_chunk,
int64_t free_chunk_slice_time) {
VLOG(4) << "Updating root " << chunk.ToString() << " with "
<< free_chunk.ToString() << ", free at t" << free_chunk_slice_time;
std::vector<FreeChunkPiece> new_pieces;
for (auto it = pieces.lower_bound(free_chunk.chunk_end() - 1);
it != pieces.end() &&
it->second.dimensions.chunk_end() >= free_chunk.offset;) {
const FreeChunkPiece& piece = it->second;
if (!free_chunk.OverlapsWith(piece.dimensions) ||
free_chunk_slice_time != piece.earliest_free_slice_time - 1) {
++it;
continue;
}
if (free_chunk.offset > piece.dimensions.offset) {
FreeChunkPiece new_piece0(
{piece.earliest_free_slice_time,
Chunk::FromOffsetEnd(
piece.dimensions.offset,
std::min(free_chunk.offset, piece.dimensions.chunk_end()))});
new_pieces.push_back(new_piece0);
}
FreeChunkPiece new_piece1(
{free_chunk_slice_time,
Chunk::FromOffsetEnd(
std::max(free_chunk.offset, piece.dimensions.offset),
std::min(free_chunk.chunk_end(), piece.dimensions.chunk_end()))});
new_pieces.push_back(new_piece1);
if (free_chunk.chunk_end() < piece.dimensions.chunk_end()) {
FreeChunkPiece new_piece2(
{piece.earliest_free_slice_time,
Chunk::FromOffsetEnd(free_chunk.chunk_end(),
piece.dimensions.chunk_end())});
new_pieces.push_back(new_piece2);
}
it = pieces.erase(it);
}
for (auto it = new_pieces.begin(); it != new_pieces.end(); ++it) {
pieces.insert({it->dimensions.offset, *it});
}
VLOG(4) << "Root after update: " << ToString();
}
namespace {
constexpr int64_t kMaxRenderOffset = 200;
constexpr int64_t kMaxRenderSliceTime = 9;
std::string RenderTimeByFreeChunks(
const std::vector<std::vector<Chunk>>& time_by_chunks) {
if (time_by_chunks.size() - 1 > kMaxRenderSliceTime) {
return "too many time slices to render";
}
std::vector<std::string> time_by_memory_units;
for (int i = 0; i < time_by_chunks.size(); ++i) {
time_by_memory_units.push_back(std::string(kMaxRenderOffset + 1, 'X'));
for (const Chunk& chunk : time_by_chunks[i]) {
if (chunk.chunk_end() > kMaxRenderOffset) {
return "largest offset is too large to render";
}
for (int j = chunk.offset; j < chunk.chunk_end(); ++j) {
time_by_memory_units[i][j] = ' ';
}
}
}
std::vector<std::string> lines;
lines.push_back(" ^");
for (int i = time_by_memory_units.size() - 1; i >= 0; --i) {
lines.push_back(absl::StrCat("t", i, " |", time_by_memory_units[i]));
}
std::string yaxis = " +";
for (int i = 0; i < kMaxRenderOffset + 1; ++i) {
if (i % 10 == 0) {
yaxis += "!";
continue;
}
if (i % 5 == 0) {
yaxis += "|";
continue;
}
yaxis += "-";
}
lines.push_back(absl::StrCat(yaxis, ">"));
lines.push_back(" space");
return absl::StrJoin(lines, "\n");
}
}
template <typename BufferType>
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
SlicedAllocationFinder(
absl::Span<const FreeChunks> free_chunks_per_slice_time,
std::vector<int64_t> sorted_slice_sizes, int64_t max_colocation_size,
int64_t preferred_offset, int64_t alignment,
std::unique_ptr<SliceTimePermutationIterator>
slice_time_permutation_iterator,
absl::AnyInvocable<bool(int64_t) const> is_offset_allowed)
: sorted_slice_sizes_(std::move(sorted_slice_sizes)),
slice_size_sum_(std::accumulate(sorted_slice_sizes_.begin(),
sorted_slice_sizes_.end(),
static_cast<int64_t>(0))),
max_colocation_size_(max_colocation_size),
preferred_offset_(preferred_offset),
alignment_(alignment),
slice_time_permutation_iterator_(
std::move(slice_time_permutation_iterator)),
is_offset_allowed_(std::move(is_offset_allowed)) {
CHECK_EQ(sorted_slice_sizes_.size(), free_chunks_per_slice_time.size())
<< "We expect a data structure explaining the free chunks at each slice "
"time.";
CHECK(!free_chunks_per_slice_time.empty())
<< "Even an unsliced allocation is expected to have a list of free "
"chunks at slice time t0.";
if (VLOG_IS_ON(1)) {
std::vector<std::vector<Chunk>> time_by_chunks;
for (int64_t i = 0; i < free_chunks_per_slice_time.size(); ++i) {
std::vector<Chunk> chunks;
for (const auto& free_chunk : free_chunks_per_slice_time[i]) {
chunks.push_back(
Chunk::FromOffsetEnd(free_chunk.first, free_chunk.second));
}
time_by_chunks.push_back(chunks);
}
LOG(INFO) << "Initial free space:\n"
<< RenderTimeByFreeChunks(time_by_chunks);
}
if (max_colocation_size_ < slice_size_sum_) {
max_colocation_size_ = slice_size_sum_;
}
for (const std::pair<const int64_t, int64_t>& free_chunk_pair :
free_chunks_per_slice_time.back()) {
Chunk free_chunk =
Chunk::FromOffsetEnd(free_chunk_pair.first, free_chunk_pair.second);
if (free_chunk.size == 0) {
continue;
}
CHECK_GT(free_chunk.size, 0);
free_chunks_.insert(
{free_chunk_pair.first, FreeChunkRoot(free_chunk, LatestSliceTime())});
}
for (int64_t free_chunk_slice_time = LatestSliceTime() - 1;
free_chunk_slice_time >= EarliestSliceTime(); --free_chunk_slice_time) {
auto it = free_chunks_.begin();
for (const std::pair<const int64_t, int64_t>& free_chunk_pair :
free_chunks_per_slice_time[free_chunk_slice_time]) {
Chunk free_chunk =
Chunk::FromOffsetEnd(free_chunk_pair.first, free_chunk_pair.second);
if (free_chunk.size == 0) {
continue;
}
CHECK_GT(free_chunk.size, 0);
for (; it != free_chunks_.end() &&
free_chunk.chunk_end() - 1 < it->second.chunk.offset;
++it) {
}
if (it == free_chunks_.end()) {
break;
}
auto previous_it = it;
for (; it != free_chunks_.end() &&
it->second.chunk.OverlapsWith(free_chunk);
previous_it = it, ++it) {
FreeChunkRoot& root = it->second;
root.Update(free_chunk, free_chunk_slice_time);
}
it = previous_it;
}
}
VLOG(2) << "Initial candidates:\n" << FreeChunksToAsciiArt();
VLOG(2) << "SlicedAllocationFinder:\n" << ToString();
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FreeChunksToAsciiArt() const {
auto it = free_chunks_.begin();
if (it == free_chunks_.end()) {
return "no candidate data";
}
int64_t final_offset = it->second.chunk.chunk_end();
if (LatestSliceTime() > kMaxRenderSliceTime ||
final_offset > kMaxRenderOffset) {
return "candidates too large to render";
}
std::vector<std::vector<Chunk>> time_by_chunks;
for (int64_t i = EarliestSliceTime(); i <= LatestSliceTime(); ++i) {
time_by_chunks.push_back({});
}
for (const std::pair<const int64_t, FreeChunkRoot>& offset_root_pair :
free_chunks_) {
for (const std::pair<const int64_t, FreeChunkPiece>& offset_piece_pair :
offset_root_pair.second.pieces) {
for (int64_t slice_time =
offset_piece_pair.second.earliest_free_slice_time;
slice_time <= LatestSliceTime(); ++slice_time) {
time_by_chunks[slice_time].push_back(
offset_piece_pair.second.dimensions);
}
}
}
return RenderTimeByFreeChunks(time_by_chunks);
}
template <typename BufferType>
std::string GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ToString() const {
std::vector<std::string> lines;
lines.push_back(absl::StrCat("slices: { ",
absl::StrJoin(sorted_slice_sizes_, ", "), " }"));
lines.push_back(absl::StrCat("max_colocation_size: ", max_colocation_size_));
lines.push_back(absl::StrCat("preferred_offset: ", preferred_offset_));
lines.push_back("free chunks:");
int i = 0;
for (auto it = free_chunks_.rbegin(); it != free_chunks_.rend(); ++it) {
lines.push_back(absl::StrCat(" chunk ", i, ": ", it->second.ToString()));
++i;
}
return absl::StrJoin(lines, "\n");
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::Find()
const {
if (preferred_offset_ >= 0) {
ChunksSortedBySliceTime chunks = FindForOffset(preferred_offset_);
if (!chunks.empty()) {
VLOG(1) << "SlicedAllocationFinder found chunks: " << "{ "
<< absl::StrJoin(chunks, ", ", absl::StreamFormatter()) << " }";
return chunks;
}
}
std::vector<const FreeChunkRoot*> root_heap;
for (auto it = free_chunks_.rbegin(); it != free_chunks_.rend(); ++it) {
root_heap.push_back(&it->second);
}
auto heap_cmp = [](const FreeChunkRoot* lhs, const FreeChunkRoot* rhs) {
if (lhs->chunk.size != rhs->chunk.size) {
return lhs->chunk.size > rhs->chunk.size;
}
return lhs->chunk.offset > rhs->chunk.offset;
};
auto heap_next = [&]() -> const FreeChunkRoot* {
if (root_heap.empty()) {
return nullptr;
}
absl::c_pop_heap(root_heap, heap_cmp);
const FreeChunkRoot* root = root_heap.back();
root_heap.pop_back();
return root;
};
absl::c_make_heap(root_heap, heap_cmp);
for (const FreeChunkRoot* root = heap_next(); root != nullptr;
root = heap_next()) {
VLOG(3) << "SlicedAllocationFinder::Find() searching " << root->ToString();
ChunksSortedBySliceTime chunks = FindInRoot(*root);
if (!chunks.empty()) {
VLOG(1) << "SlicedAllocationFinder found chunks: " << "{ "
<< absl::StrJoin(chunks, ", ", absl::StreamFormatter()) << " }";
return chunks;
}
}
LOG(ERROR) << "We did not find a place for our sliced allocation. This "
"should not happen because MSA operates on an infinitely "
"sized heap.";
return {};
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::FindForOffset(int64_t offset) const {
VLOG(3) << "SlicedAllocationFinder::FindForOffset() searching offset "
<< offset;
auto it = free_chunks_.lower_bound(offset);
if (it != free_chunks_.end()) {
const FreeChunkRoot* root = &it->second;
ChunksSortedBySliceTime chunks = FindInRoot(*root, offset);
if (!chunks.empty()) {
VLOG(3) << "SlicedAllocationFinder found chunks at " << offset << ": "
<< "{ " << absl::StrJoin(chunks, ", ", absl::StreamFormatter())
<< " }";
return chunks;
}
}
return {};
}
template <typename BufferType>
absl::Status GlobalDecreasingSizeBestFitHeap<BufferType>::
SlicedAllocationFinder::DoesPermutationFit(
absl::Span<const int64_t> permutation_of_slice_times,
const FreeChunkRoot& root, int64_t offset) const {
absl::Status result =
DoesPermutationFitImpl(permutation_of_slice_times, root, offset);
VLOG(3) << "SlicedAllocationFinder::DoesPermutationFit\n"
<< " permutation of slice times: [ "
<< absl::StrJoin(permutation_of_slice_times, ",") << " ]\n"
<< " offset: " << offset << "\n"
<< " root: " << root.ToString() << "\n"
<< " -> " << result;
return result;
}
template <typename BufferType>
absl::Status GlobalDecreasingSizeBestFitHeap<BufferType>::
SlicedAllocationFinder::DoesPermutationFitImpl(
absl::Span<const int64_t> permutation_of_slice_times,
const FreeChunkRoot& root, int64_t offset) const {
if (permutation_of_slice_times.size() != sorted_slice_sizes_.size()) {
return InvalidArgumentStrCat(
sorted_slice_sizes_.size(), " slices times expected in permutation. ",
permutation_of_slice_times.size(), " specified.");
}
if (offset >= root.chunk.chunk_end()) {
return FailedPrecondition(
"%s", absl::StrCat("Free chunk root ", root.chunk.ToString(),
" does not overlap with offset ", offset, "."));
}
if (offset + max_colocation_size_ > root.chunk.chunk_end()) {
return FailedPrecondition(
"%s", absl::StrCat("Not enough space to fit enitre allocation [",
offset, ", ", offset + max_colocation_size_,
") in free chunk root ", root.chunk.ToString()));
}
if (!is_offset_allowed_(offset)) {
return FailedPrecondition(
"%s", absl::StrCat("We are not permitted to place an allocation at ",
"offset ", offset, "."));
}
auto piece_fwd_it = root.pieces.lower_bound(offset);
if (piece_fwd_it == root.pieces.end()) {
return FailedPrecondition(
"%s", absl::StrCat("Offset ", offset, " comes before free chunk root ",
root.chunk.ToString()));
}
++piece_fwd_it;
auto piece_reverse_it = std::make_reverse_iterator(piece_fwd_it);
auto at_pieces_end = [&](auto it) { return it == root.pieces.rend(); };
size_t slice_index = 0;
auto out_of_slices = [&](size_t index) { return index > LatestSliceTime(); };
int64_t amount_of_current_slice_consumed = 0;
int64_t current_offset = offset;
while (!at_pieces_end(piece_reverse_it) && !out_of_slices(slice_index)) {
int64_t current_slice_time = permutation_of_slice_times[slice_index];
int64_t current_slice_size = sorted_slice_sizes_[slice_index];
int64_t remaining_in_slice =
current_slice_size - amount_of_current_slice_consumed;
int64_t current_piece_time =
piece_reverse_it->second.earliest_free_slice_time;
int64_t remaining_in_piece =
piece_reverse_it->second.dimensions.chunk_end() - current_offset;
int64_t amount_to_consume =
std::min(remaining_in_slice, remaining_in_piece);
if (current_piece_time > current_slice_time) {
return FailedPrecondition(
"%s",
absl::StrCat("At slice time t", current_slice_time, ", slice ",
slice_index, " does not fit at offset ", current_offset,
" in root ", root.chunk.ToString()));
}
if (remaining_in_slice >= remaining_in_piece) {
++piece_reverse_it;
amount_of_current_slice_consumed += amount_to_consume;
}
if (remaining_in_slice <= remaining_in_piece) {
++slice_index;
amount_of_current_slice_consumed = 0;
}
current_offset += amount_to_consume;
}
if (!out_of_slices(slice_index)) {
return InternalStrCat("Ran out of space in root ", root.chunk.ToString(),
" to fit slice permutation; however, we should "
"have caught such a condition earlier.");
}
return absl::OkStatus();
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::FindInRoot(
const FreeChunkRoot& root,
std::optional<int64_t> only_try_this_offset) const {
int64_t first_offset = root.chunk.offset;
int64_t last_end = root.chunk.chunk_end();
if (only_try_this_offset.has_value()) {
first_offset = *only_try_this_offset;
last_end = *only_try_this_offset + max_colocation_size_;
if (*only_try_this_offset % alignment_ != 0) {
return {};
}
} else if (first_offset % alignment_ != 0) {
first_offset = first_offset + (alignment_ - (first_offset % alignment_));
}
CHECK_EQ(first_offset % alignment_, 0);
for (int64_t offset = first_offset; offset + max_colocation_size_ <= last_end;
offset += alignment_) {
for (slice_time_permutation_iterator_->Begin();
!slice_time_permutation_iterator_->Done();
slice_time_permutation_iterator_->Next()) {
if (DoesPermutationFit(slice_time_permutation_iterator_->Get(), root,
offset)
.ok()) {
return PermutationToChunks(slice_time_permutation_iterator_->Get(),
offset);
}
}
if (root.pieces.size() == 1) {
break;
}
}
return {};
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<
BufferType>::SlicedAllocationFinder::ChunksSortedBySliceTime
GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder::
PermutationToChunks(absl::Span<const int64_t> permutation_of_slice_times,
int64_t offset) const {
ChunksSortedBySliceTime chunks(permutation_of_slice_times.size() + 1,
Chunk::FromOffsetSize(-1, 1));
int64_t current_offset = offset;
for (int64_t slice_index = 0; slice_index <= LatestSliceTime();
++slice_index) {
int64_t size = sorted_slice_sizes_[slice_index];
chunks[permutation_of_slice_times[slice_index]] =
Chunk::FromOffsetSize(current_offset, size);
current_offset += size;
}
chunks.back() = Chunk::FromOffsetSize(
current_offset, max_colocation_size_ - (current_offset - offset));
DCHECK(std::all_of(chunks.begin(), chunks.end(), [](const Chunk& chunk) {
return chunk.offset >= 0 && chunk.size >= 0;
}));
return chunks;
}
template <typename BufferType>
absl::StatusOr<HeapSimulator::Result<BufferType>>
GlobalDecreasingSizeBestFitHeap<BufferType>::Finish() {
std::vector<BufferInterval> sorted_buffer_intervals =
GetSortedBufferIntervals();
for (auto& buffer_interval : sorted_buffer_intervals) {
if (!buffer_interval.need_allocation) {
continue;
}
CommitChunk(buffer_interval, FindChunkCandidate(buffer_interval));
}
VLOG(1) << "result heap_size: " << result_.heap_size;
Result result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(result_);
return result;
}
template <typename BufferType>
std::vector<
typename GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval>
GlobalDecreasingSizeBestFitHeap<BufferType>::GetSortedBufferIntervals() const {
std::vector<BufferInterval> sorted_buffer_intervals;
sorted_buffer_intervals.reserve(buffer_intervals_.size());
for (auto& entry : buffer_intervals_) {
sorted_buffer_intervals.push_back(entry.second);
}
absl::c_sort(sorted_buffer_intervals, buffer_interval_compare_);
return sorted_buffer_intervals;
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk
GlobalDecreasingSizeBestFitHeap<BufferType>::FindChunkCandidate(
const GlobalDecreasingSizeBestFitHeap::BufferInterval& buffer_interval,
int64_t preferred_offset) const {
const SlicedBufferInterval sliced_buffer_interval =
SlicedBufferInterval::CreateConstInterval(buffer_interval);
std::vector<Chunk> chunks =
FindChunkCandidates(sliced_buffer_interval, preferred_offset);
CHECK_EQ(chunks.size(), 1);
return chunks[0];
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::FreeChunks
GlobalDecreasingSizeBestFitHeap<BufferType>::MakeFreeChunks(
const BufferInterval& buffer_interval, int64_t max_colocation_size) const {
FreeChunks free_chunks{
{0, INT64_MAX}};
auto subtract_used_chunks = [&](const std::vector<Chunk>& used_chunks) {
for (const Chunk& used_chunk : used_chunks) {
auto it_end = free_chunks.lower_bound(used_chunk.chunk_end());
if (it_end == free_chunks.end()) continue;
auto it_start = free_chunks.lower_bound(used_chunk.offset);
int64_t free_chunk_end = it_end->second;
if (it_start != free_chunks.end()) {
if (used_chunk.offset - it_start->first >= buffer_interval.size) {
it_start->second = std::min(it_start->second, used_chunk.offset);
} else {
++it_start;
}
}
free_chunks.erase(it_end, it_start);
int64_t chunk_end_aligned = RoundUpTo(used_chunk.chunk_end(), alignment_);
if (free_chunk_end - chunk_end_aligned >= max_colocation_size) {
CHECK(free_chunks.insert({chunk_end_aligned, free_chunk_end}).second);
}
}
};
subtract_used_chunks(interval_tree_.ChunksOverlappingInTime(
buffer_interval.start, buffer_interval.end));
for (const BufferType* colocation :
GetTransitiveColocations(buffer_interval)) {
const BufferInterval& interval = buffer_intervals_.at(colocation);
VLOG(1) << " Alias size " << interval.size << ", start " << interval.start
<< ", end " << interval.end << " " << interval.buffer->ToString();
subtract_used_chunks(
interval_tree_.ChunksOverlappingInTime(interval.start, interval.end));
}
return free_chunks;
}
template <typename BufferType>
std::vector<typename GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk>
GlobalDecreasingSizeBestFitHeap<BufferType>::FindChunkCandidates(
const SlicedBufferInterval& sliced_buffer_interval,
int64_t preferred_offset) const {
VLOG(1) << "Finding chunks for sliced buffer interval: "
<< sliced_buffer_interval.ToString();
int64_t max_colocation_size =
GetMaxColocationSize(sliced_buffer_interval.full_buffer_interval());
auto chunks =
CreateSlicedAllocationFinder(
sliced_buffer_interval, max_colocation_size, preferred_offset,
SliceTimePermutationIterator::CreateForNewAllocation(
slice_time_permutation_iteration_type_,
sliced_buffer_interval.inclusive_start_times()))
.Find();
return PostProcessFindChunkCandidatesResult(sliced_buffer_interval,
std::move(chunks));
}
template <typename BufferType>
int64_t GlobalDecreasingSizeBestFitHeap<BufferType>::GetMaxColocationSize(
const BufferInterval& buffer_interval) const {
int64_t max_colocation_size = buffer_interval.size;
for (const BufferType* colocation :
GetTransitiveColocations(buffer_interval)) {
max_colocation_size =
std::max(max_colocation_size, buffer_intervals_.at(colocation).size);
}
return max_colocation_size;
}
template <typename BufferType>
typename GlobalDecreasingSizeBestFitHeap<BufferType>::SlicedAllocationFinder
GlobalDecreasingSizeBestFitHeap<BufferType>::CreateSlicedAllocationFinder(
const SlicedBufferInterval& sliced_interval, int64_t max_colocation_size,
int64_t preferred_offset,
std::unique_ptr<SliceTimePermutationIterator>
slice_time_permutation_iterator,
absl::AnyInvocable<bool(int64_t) const> is_offset_allowed) const {
std::vector<FreeChunks> free_chunks_per_slice_time;
free_chunks_per_slice_time.reserve(sliced_interval.num_slices());
for (int slice_time = 0; slice_time < sliced_interval.num_slices() - 1;
++slice_time) {
free_chunks_per_slice_time.push_back(
MakeFreeChunks(sliced_interval.IntervalForMakeFreeChunks(slice_time),
-1));
}
free_chunks_per_slice_time.push_back(MakeFreeChunks(
sliced_interval.IntervalForMakeFreeChunks(sliced_interval.num_slices() -
1),
max_colocation_size));
return SlicedAllocationFinder(
free_chunks_per_slice_time, sliced_interval.SliceSizesSortedByOffset(),
max_colocation_size, preferred_offset, alignment_,
std::move(slice_time_permutation_iterator), std::move(is_offset_allowed));
}
template <typename BufferType>
std::vector<typename GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk>
GlobalDecreasingSizeBestFitHeap<BufferType>::
PostProcessFindChunkCandidatesResult(
const SlicedBufferInterval& sliced_interval,
std::vector<Chunk> chunks) const {
if (chunks.empty()) {
return {};
}
CHECK_EQ(chunks.size(), sliced_interval.num_slices() + 1);
chunks.pop_back();
return chunks;
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::CommitChunk(
const GlobalDecreasingSizeBestFitHeap<BufferType>::BufferInterval&
buffer_interval,
GlobalDecreasingSizeBestFitHeap<BufferType>::Chunk chunk) {
CHECK_EQ(chunk.size, buffer_interval.size);
result_.heap_size = result_.UpdatedHeapSize(chunk);
interval_tree_.Add(buffer_interval.start, buffer_interval.end, chunk);
for (auto colocation : GetTransitiveColocations(buffer_interval)) {
auto colocation_interval = buffer_intervals_[colocation];
Chunk colocation_chunk =
Chunk::FromOffsetSize(chunk.offset, colocation_interval.size);
result_.heap_size = result_.UpdatedHeapSize(colocation_chunk);
interval_tree_.Add(colocation_interval.start, colocation_interval.end,
colocation_chunk);
AddToChunkMap(colocation, colocation_chunk);
}
AddToChunkMap(buffer_interval.buffer, chunk);
}
template <typename BufferType>
void GlobalDecreasingSizeBestFitHeap<BufferType>::AddToChunkMap(
const BufferType* buffer, Chunk chunk) {
const auto emplace_result = result_.chunk_map.emplace(buffer, chunk);
DCHECK(emplace_result.second);
}
absl::StatusOr<HeapSimulator::Result<HloValue>>
ConstrainedGlobalDecreasingSizeBestFitHeap::Finish() {
std::vector<BufferInterval> sorted_buffer_vec = GetSortedBufferIntervals();
std::list<BufferInterval> sorted_buffer_intervals(sorted_buffer_vec.begin(),
sorted_buffer_vec.end());
Result multi_heap_result;
do {
for (auto it = sorted_buffer_intervals.begin();
it != sorted_buffer_intervals.end();) {
BufferInterval buffer_interval = *it;
if (!buffer_interval.need_allocation) {
it = sorted_buffer_intervals.erase(it);
continue;
}
if (buffer_interval.size > size_limit_per_heap_) {
LOG(WARNING) << "Alloc buffer size " << buffer_interval.size
<< " larger than the per-heap size limit "
<< size_limit_per_heap_;
}
Chunk chunk_candidate = FindChunkCandidate(buffer_interval);
if (chunk_candidate.chunk_end() <= size_limit_per_heap_ ||
result_.heap_size == 0) {
CommitChunk(buffer_interval, chunk_candidate);
it = sorted_buffer_intervals.erase(it);
continue;
}
++it;
}
multi_heap_result.heap_size += result_.heap_size;
multi_heap_result.heap_results.push_back(std::move(result_));
result_ = {};
interval_tree_ = {};
} while (!sorted_buffer_intervals.empty());
VLOG(1) << "Number of heaps produced = "
<< multi_heap_result.heap_results.size();
return multi_heap_result;
}
template <typename BufferType>
absl::StatusOr<HeapSimulator::Result<BufferType>>
ChooseBestHeapAlgorithm<BufferType>::Finish() {
DCHECK(!algorithms_.empty());
std::vector<Result> results(algorithms_.size());
int64_t min_size = INT64_MAX;
int min_size_index = -1;
for (int i = 0; i < algorithms_.size(); ++i) {
TF_ASSIGN_OR_RETURN(results[i], algorithms_[i]->Finish());
if (results[i].heap_size < min_size) {
min_size = results[i].heap_size;
min_size_index = i;
}
}
DCHECK_GE(min_size_index, 0);
return results[min_size_index];
}
template class GlobalDecreasingSizeBestFitHeap<HloValue>;
template class GlobalDecreasingSizeBestFitHeap<AllocationBlock>;
template class ChooseBestHeapAlgorithm<HloValue>;
} | #include "xla/service/heap_simulator/heap_simulator.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::ContainerEq;
using ::testing::HasSubstr;
using ::testing::StrEq;
class MinimumMemoryForSequenceTest : public HloTestBase {};
TEST_F(MinimumMemoryForSequenceTest, MultiComputation) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 0));
HloInstruction* cond_data = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_data, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param_iter"));
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({iter, data}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_data, cond_lt});
schedule.set_sequence(body_computation, {body_param});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(25,
HeapSimulator::MinimumMemoryForModule(schedule, size_fn).value());
}
TEST_F(MinimumMemoryForSequenceTest, SubcomputationAccounting) {
auto module = CreateNewVerifiedModule();
const Shape r0f32 = ShapeUtil::MakeShape(F32, {});
const Shape r1f32 = ShapeUtil::MakeShape(F32, {4});
const Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 4});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "cond_param"));
HloInstruction* slice =
cond_builder.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1}), cond_param, {0}, {1}, {1}));
HloInstruction* reshape =
cond_builder.AddInstruction(HloInstruction::CreateReshape(r0f32, slice));
HloInstruction* zero = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
HloInstruction* cond_comparison = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), reshape,
zero, ComparisonDirection::kNe));
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, r1f32, "body_param"));
HloInstruction* one_vector =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1})));
HloInstruction* subtract =
body_builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, body_param, one_vector));
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* while_init =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 1, 1, 1})));
HloInstruction* while_loop =
builder.AddInstruction(HloInstruction::CreateWhile(
r1f32, cond_computation, body_computation, while_init));
HloInstruction* bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(r2f32, while_loop, {1}));
HloInstruction* matrix = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>(
{{1.0, 2.0, 3.0, 4.0}, {1.0, 2.0, 3.0, 4.0}})));
HloInstruction* transpose = builder.AddInstruction(
HloInstruction::CreateTranspose(r2f32, matrix, {0, 1}));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, transpose, bcast));
auto entry_computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
std::vector<HloInstruction*> cond_vec = {cond_param, slice, reshape, zero,
cond_comparison};
std::vector<HloInstruction*> while_body_vec = {body_param, one_vector,
subtract};
std::vector<HloInstruction*> entry_comp_vec = {while_init, while_loop, bcast,
matrix, transpose, add};
schedule.set_sequence(cond_computation, cond_vec);
schedule.set_sequence(body_computation, while_body_vec);
schedule.set_sequence(entry_computation, entry_comp_vec);
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module.get()).value();
EXPECT_EQ(64, HeapSimulator::MinimumMemoryForComputation(
*entry_computation, schedule.sequence(entry_computation),
*alias_analysis, size_fn)
.value());
}
const char kAlloc[] = "Alloc";
const char kFree[] = "Free";
const char kShare[] = "Share";
const char kFinish[] = "Finish";
using CallSequence = std::vector<std::pair<std::string, const HloValue*>>;
class HeapCallRecorder : public HeapAlgorithm<HloValue> {
public:
explicit HeapCallRecorder(CallSequence* calls) : calls_(calls) {}
~HeapCallRecorder() override {}
void Alloc(const HloValue* buffer, int64_t size) override {
calls_->emplace_back(kAlloc, buffer);
const int64_t offset = result_.chunk_map.size();
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(offset, size));
}
void ShareWith(const HloValue* buffer, const HloValue* shared,
int64_t size) override {
calls_->emplace_back(kShare, buffer);
const int64_t offset = result_.chunk_map[shared].offset;
result_.chunk_map.emplace(buffer, Chunk::FromOffsetSize(offset, size));
}
void Free(const HloValue* buffer, int64_t size) override {
calls_->emplace_back(kFree, buffer);
}
absl::StatusOr<Result> Finish() override {
calls_->emplace_back(kFinish, nullptr);
HeapSimulator::Result<HloValue> result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(std::move(result_));
return result;
}
private:
CallSequence* calls_;
HeapSimulator::HeapResult<HloValue> result_;
};
class HeapSimulatorTracker {
public:
explicit HeapSimulatorTracker(
std::unique_ptr<HloModule> module,
const std::vector<HloInstruction*>& instruction_sequence,
const std::vector<HloInstruction*>& must_alias_set = {},
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
module_ = std::move(module);
Init(instruction_sequence, can_share_buffer);
}
explicit HeapSimulatorTracker(
const std::string& name,
std::unique_ptr<HloComputation> entry_computation,
const std::vector<HloInstruction*>& instruction_sequence,
const std::vector<HloInstruction*>& must_alias_set = {},
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer = nullptr) {
HloModuleConfig config;
module_ = std::make_unique<HloModule>(name, config);
module_->AddEntryComputation(std::move(entry_computation));
Init(instruction_sequence, can_share_buffer);
}
explicit HeapSimulatorTracker(const std::string& name) {
HloModuleConfig config;
module_ = std::make_unique<HloModule>(name, config);
}
void RunWholeModule(
const std::vector<HloInstruction*>& full_module_sequence) {
alias_analysis_ = HloAliasAnalysis::Run(module_.get()).value();
HloSchedule schedule(module_.get());
absl::flat_hash_map<const HloInstruction*, int> reverse_position;
for (int i = 0; i < full_module_sequence.size(); ++i) {
HloInstruction* instruction = full_module_sequence[i];
schedule.GetOrCreateSequence(instruction->parent())
.push_back(instruction);
reverse_position[instruction] = full_module_sequence.size() - i;
}
auto size_fn = [&reverse_position](const BufferValue& buffer) {
return reverse_position[buffer.instruction()];
};
auto algorithm = std::make_unique<HeapCallRecorder>(&actual_calls_);
result_ = HeapSimulator::Run(std::move(algorithm), *module_, schedule,
*alias_analysis_, size_fn)
.value();
}
HloModule* module() { return module_.get(); }
const HloValue* BufferAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return &alias_analysis_->dataflow_analysis().GetUniqueValueAt(instruction,
index);
}
int64_t OffsetAt(const HloInstruction* instruction, const ShapeIndex& index) {
const HloValue* buffer = BufferAt(instruction, index);
CHECK_EQ(1, result_.heap_results.size());
return result_.heap_results.at(0).chunk_map.at(buffer).offset;
}
void ExpectCallSequence(const CallSequence& expected) const {
auto to_string = [](const CallSequence& sequence) {
std::string output;
for (int64_t i = 0; i < sequence.size(); ++i) {
auto pair = sequence.at(i);
absl::StrAppendFormat(&output, "%d", i);
absl::StrAppendFormat(&output, " :%s", pair.first);
if (pair.second != nullptr) {
absl::StrAppendFormat(&output, " - %s{%s}\n",
pair.second->instruction()->name(),
pair.second->index().ToString());
}
}
return output;
};
EXPECT_EQ(expected, actual_calls_) << "Expected:\n"
<< to_string(expected) << " \nActual:\n"
<< to_string(actual_calls_) << "\n";
}
void ExpectSharedBuffers(const HloInstruction* instruction_a,
const ShapeIndex& index_a,
const HloInstruction* instruction_b,
const ShapeIndex& index_b) {
int64_t offset_a = OffsetAt(instruction_a, index_a);
int64_t offset_b = OffsetAt(instruction_b, index_b);
EXPECT_EQ(offset_a, offset_b);
}
private:
void Init(const std::vector<HloInstruction*>& instruction_sequence,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
auto zero_size = [](const BufferValue& buffer) { return 0; };
auto algorithm = std::make_unique<HeapCallRecorder>(&actual_calls_);
alias_analysis_ =
HloAliasAnalysis::Run(module_.get(), can_share_buffer).value();
HeapSimulator::Options options;
result_ =
HeapSimulator::Run(std::move(algorithm), *module_->entry_computation(),
HloInstructionSequence(instruction_sequence),
*alias_analysis_, zero_size, options)
.value();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
CallSequence actual_calls_;
HeapSimulator::Result<HloValue> result_;
};
class HeapSimulatorTest : public HloTestBase {
protected:
HeapSimulatorTest() {}
~HeapSimulatorTest() override {}
Shape f32scalar_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
};
TEST_F(HeapSimulatorTest, ScalarConstant) {
auto builder = HloComputation::Builder(TestName());
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HeapSimulatorTracker tracker(TestName(), builder.Build(), {const0});
tracker.ExpectCallSequence({{kFinish, nullptr}});
}
TEST_F(HeapSimulatorTest, OneParam) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "param0"));
HeapSimulatorTracker tracker(TestName(), builder.Build(), {param0});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(param0, {})},
{kFree, tracker.BufferAt(param0, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, Multiply) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, add});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(mul, {})},
{kShare, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(add, {})},
{kFinish, nullptr},
});
tracker.ExpectSharedBuffers(add, {}, mul, {});
}
TEST_F(HeapSimulatorTest, FusionOutputsOnlyShareOnce) {
auto can_share_buffer =
[](const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index) -> std::optional<bool> {
return instr->opcode() == HloOpcode::kFusion &&
operand->shape().IsArray() &&
ShapeUtil::Equal(operand->shape(),
ShapeUtil::GetSubshape(instr->shape(), user_index));
};
HloModuleConfig config;
auto module = std::make_unique<HloModule>(TestName(), config);
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, paramA));
auto fusion_builder = HloComputation::Builder("simple_two_way_forwarding");
{
auto param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "x"));
fusion_builder.AddInstruction(HloInstruction::CreateTuple({param, param}));
}
auto fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {negate}, fusion_computation));
auto element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 0));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element1));
builder.AddInstruction(HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd,
negate0, negate1));
module->AddEntryComputation(builder.Build());
HeapSimulatorTracker tracker(
std::move(module),
{paramA, negate, fusion, element0, element1, negate0, negate1}, {},
can_share_buffer);
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(negate, {})},
{kAlloc, tracker.BufferAt(fusion, {})},
{kFree, tracker.BufferAt(negate, {})},
{kShare, tracker.BufferAt(fusion, {0})},
{kAlloc, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(negate0, {})},
{kFree, tracker.BufferAt(fusion, {0})},
{kFree, tracker.BufferAt(negate0, {})},
{kAlloc, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, FusionOutputsOnlyShareOnceOutputShortLived) {
auto can_share_buffer =
[](const HloInstruction* instr, const HloInstruction* operand,
const ShapeIndex& user_index) -> std::optional<bool> {
if (instr->opcode() == HloOpcode::kFusion) {
return true;
}
return false;
};
HloModuleConfig config;
auto module = std::make_unique<HloModule>(TestName(), config);
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, paramA));
auto fusion_builder = HloComputation::Builder("simple_two_way_forwarding");
{
auto param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "x"));
fusion_builder.AddInstruction(HloInstruction::CreateTuple({param, param}));
}
auto fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {negate}, fusion_computation));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, fusion, 1));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, element1));
module->AddEntryComputation(builder.Build());
HeapSimulatorTracker tracker(std::move(module),
{paramA, negate, fusion, element1, negate1}, {},
can_share_buffer);
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(negate, {})},
{kFree, tracker.BufferAt(negate, {})},
{kShare, tracker.BufferAt(fusion, {0})},
{kAlloc, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(fusion, {0})},
{kFree, tracker.BufferAt(fusion, {})},
{kAlloc, tracker.BufferAt(negate1, {})},
{kFree, tracker.BufferAt(fusion, {1})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(negate1, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, BufferReusedOnce) {
HeapSimulatorTracker tracker(TestName());
auto builder = HloComputation::Builder(TestName());
HloComputation::Builder fusion_builder("fusion");
{
HloComputation::Builder& builder = fusion_builder;
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, f32vec4_, "A"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kExp, a_param));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, a_param));
builder.AddInstruction(HloInstruction::CreateTuple({exp, neg}));
}
auto fusion_computation =
tracker.module()->AddEmbeddedComputation(fusion_builder.Build());
auto a_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(f32vec4_, HloOpcode::kNegate, a_param));
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
ShapeUtil::MakeTupleShape({f32vec4_, f32vec4_}),
HloInstruction::FusionKind::kLoop, {neg}, fusion_computation));
tracker.module()->AddEntryComputation(builder.Build());
tracker.RunWholeModule({a_param, neg, fusion});
auto neg_buffer = tracker.OffsetAt(neg, {});
int64_t output_buffer_0 = tracker.OffsetAt(fusion, {0});
int64_t output_buffer_1 = tracker.OffsetAt(fusion, {1});
EXPECT_TRUE((neg_buffer == output_buffer_0) ^
(neg_buffer == output_buffer_1));
}
TEST_F(HeapSimulatorTest, MultiplyDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyDotAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, dot, paramA));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot, add});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot, {})},
{kFree, tracker.BufferAt(mul, {})},
{kFree, tracker.BufferAt(dot, {})},
{kShare, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(add, {})},
{kFinish, nullptr},
});
tracker.ExpectSharedBuffers(add, {}, dot, {});
}
TEST_F(HeapSimulatorTest, MultiplyDotDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot0 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto dot1 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, dot0, paramY, dot_dnums, DefaultPrecisionConfig(2)));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot0, dot1});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot1, {})},
{kFree, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot1, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, MultiplyDotDotTuple) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32scalar_, "paramY"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot0 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, mul, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto dot1 = builder.AddInstruction(HloInstruction::CreateDot(
f32vec4_, dot0, paramY, dot_dnums, DefaultPrecisionConfig(2)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({dot0, dot1}));
HeapSimulatorTracker tracker(
TestName(), builder.Build(),
{paramA, paramX, mul, paramY, dot0, dot1, tuple});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramX, {})},
{kAlloc, tracker.BufferAt(paramY, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(dot1, {})},
{kAlloc, tracker.BufferAt(tuple, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramX, {})},
{kFree, tracker.BufferAt(paramY, {})},
{kFree, tracker.BufferAt(dot0, {})},
{kFree, tracker.BufferAt(dot1, {})},
{kFree, tracker.BufferAt(tuple, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, IndependentTupleElements) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32scalar_, "paramA"));
auto paramB = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32scalar_, "paramB"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32scalar_, HloOpcode::kMultiply, paramA, paramB));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
f32scalar_, HloOpcode::kAdd, paramA, paramB));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({mul, add}));
auto element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, tuple, 0));
auto broadcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(f32vec4_, element0, {0}));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
f32scalar_, HloOpcode::kSubtract, paramA, paramB));
auto element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32scalar_, tuple, 1));
auto output = builder.AddInstruction(
HloInstruction::CreateTuple({broadcast, sub, element1}));
HeapSimulatorTracker tracker(TestName(), builder.Build(),
{paramA, paramB, mul, add, tuple, element0,
broadcast, sub, element1, output});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(paramA, {})},
{kAlloc, tracker.BufferAt(paramB, {})},
{kAlloc, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(add, {})},
{kAlloc, tracker.BufferAt(tuple, {})},
{kAlloc, tracker.BufferAt(broadcast, {})},
{kFree, tracker.BufferAt(mul, {})},
{kAlloc, tracker.BufferAt(sub, {})},
{kFree, tracker.BufferAt(tuple, {})},
{kAlloc, tracker.BufferAt(output, {})},
{kFree, tracker.BufferAt(paramA, {})},
{kFree, tracker.BufferAt(paramB, {})},
{kFree, tracker.BufferAt(add, {})},
{kFree, tracker.BufferAt(broadcast, {})},
{kFree, tracker.BufferAt(sub, {})},
{kFree, tracker.BufferAt(output, {})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, WholeModule) {
HeapSimulatorTracker tracker(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 0));
HloInstruction* cond_data = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_data, ComparisonDirection::kLt));
HloComputation* cond_computation =
tracker.module()->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloComputation* body_computation =
tracker.module()->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, param));
tracker.module()->AddEntryComputation(builder.Build());
tracker.RunWholeModule(
{param, while_op, body_param, cond_param, cond_iter, cond_data, cond_lt});
tracker.ExpectCallSequence({
{kAlloc, tracker.BufferAt(param, {})},
{kAlloc, tracker.BufferAt(param, {0})},
{kAlloc, tracker.BufferAt(param, {1})},
{kAlloc, tracker.BufferAt(cond_lt, {})},
{kFree, tracker.BufferAt(cond_lt, {})},
{kFree, tracker.BufferAt(param, {})},
{kFree, tracker.BufferAt(param, {0})},
{kFree, tracker.BufferAt(param, {1})},
{kFinish, nullptr},
});
}
TEST_F(HeapSimulatorTest, AsyncCallImplicitSharding) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
called_computation {
param0 = f32[4] parameter(0)
constant = f32[1] constant(1)
dynamic-update-slice = f32[4] dynamic-update-slice(param0, constant, constant)
ROOT negate = f32[4] negate(dynamic-update-slice)
}
ENTRY entry {
p0 = f32[8] parameter(0)
call-start = ((f32[8]), f32[8], s32[]) call-start(p0), async_execution_thread="foo", to_apply=called_computation
ROOT call-done = f32[8] call-done(call-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
auto size_fn = [](const BufferValue& buffer) -> int64_t {
const Shape& shape = buffer.shape();
if (!shape.IsArray()) {
return 0;
}
return ShapeUtil::ByteSizeOf(shape);
};
auto algorithm = std::make_unique<GlobalDecreasingSizeBestFitHeap<HloValue>>(
1);
HeapSimulator::Result<HloValue> result =
HeapSimulator::Run(std::move(algorithm), *module, module->schedule(),
*alias_analysis, size_fn)
.value();
for (const auto& [value, chunk] : result.heap_results[0].chunk_map) {
if (value->instruction()->name() == "dynamic-update-slice") {
EXPECT_EQ(chunk.size, 32);
}
}
}
class HeapAlgorithmTestBase : public ::testing::Test {
protected:
HeapAlgorithmTestBase() : builder_("heap_simulator_test") {
buffer_a_ = DummyBufferValue();
buffer_b_ = DummyBufferValue();
buffer_c_ = DummyBufferValue();
buffer_d_ = DummyBufferValue();
buffer_e_ = DummyBufferValue();
buffer_f_ = DummyBufferValue();
buffer_g_ = DummyBufferValue();
buffer_h_ = DummyBufferValue();
buffer_i_ = DummyBufferValue();
}
~HeapAlgorithmTestBase() override {}
const HloValue* buffer_a_;
const HloValue* buffer_b_;
const HloValue* buffer_c_;
const HloValue* buffer_d_;
const HloValue* buffer_e_;
const HloValue* buffer_f_;
const HloValue* buffer_g_;
const HloValue* buffer_h_;
const HloValue* buffer_i_;
private:
const HloValue* DummyBufferValue() {
const HloValue::Id id = buffers_.size();
auto const0 = builder_.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
buffers_.emplace_back(std::make_unique<HloValue>(id, const0, ShapeIndex{}));
return buffers_.back().get();
}
HloComputation::Builder builder_;
std::vector<std::unique_ptr<HloValue>> buffers_;
};
class NoFragmentationStatsHeapTest : public HeapAlgorithmTestBase {};
TEST_F(NoFragmentationStatsHeapTest, Empty) {
NoFragmentationStatsHeap<HloValue> heap;
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(0, result.heap_size);
}
TEST_F(NoFragmentationStatsHeapTest, Simple) {
NoFragmentationStatsHeap<HloValue> heap;
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 30);
heap.Alloc(buffer_d_, 30);
heap.Free(buffer_a_, 10);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 30);
heap.Free(buffer_d_, 30);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(90, result.heap_size);
}
TEST_F(NoFragmentationStatsHeapTest, Mixed) {
NoFragmentationStatsHeap<HloValue> heap;
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Free(buffer_b_, 20);
heap.Alloc(buffer_c_, 30);
heap.Free(buffer_c_, 30);
heap.Alloc(buffer_d_, 5);
heap.Free(buffer_d_, 5);
heap.Free(buffer_a_, 10);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(40, result.heap_size);
}
class GlobalDecreasingSizeBestFitHeapTest : public HeapAlgorithmTestBase {};
TEST_F(GlobalDecreasingSizeBestFitHeapTest, Empty) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(0, result.heap_size);
EXPECT_EQ(1, result.heap_results.size());
EXPECT_EQ(0, result.heap_results.at(0).chunk_map.size());
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, DecreasingSize) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 30);
heap.Alloc(buffer_c_, 20);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_a_, 10);
heap.Free(buffer_b_, 30);
heap.Free(buffer_c_, 20);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(100, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_d_).size);
EXPECT_EQ(90, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(40, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(70, result.chunk_map.at(buffer_c_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_d_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, DecreasingSizeWithAlignment) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(20);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 50);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 50);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(120, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(50, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_d_).size);
EXPECT_EQ(60, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(100, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
EXPECT_EQ(60, result.chunk_map.at(buffer_d_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, BestFit) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 40);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_d_, 30);
heap.Alloc(buffer_e_, 50);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 40);
heap.Free(buffer_d_, 30);
heap.Free(buffer_e_, 50);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(140, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_d_).size);
EXPECT_EQ(50, result.chunk_map.at(buffer_e_).size);
EXPECT_EQ(90, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(120, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(50, result.chunk_map.at(buffer_c_).offset);
EXPECT_EQ(90, result.chunk_map.at(buffer_d_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_e_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, Colocated) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.Free(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 40);
heap.Free(buffer_c_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(40, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedII) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 40);
heap.Free(buffer_c_, 40);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(60, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(40, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(40, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedIII) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 10);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_b_, 30);
heap.ShareWith(buffer_c_, buffer_a_, 10);
heap.Free(buffer_c_, 10);
heap.Free(buffer_b_, 30);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(40, result.heap_size);
EXPECT_EQ(10, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(10, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(30, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedDifferentSize1) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 30);
heap.Free(buffer_c_, 30);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(50, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(30, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(30, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
TEST_F(GlobalDecreasingSizeBestFitHeapTest, ColocatedDifferentSize2) {
GlobalDecreasingSizeBestFitHeap<HloValue> heap(1);
heap.Alloc(buffer_a_, 40);
heap.Free(buffer_a_, 40);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 50);
heap.Free(buffer_c_, 50);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> results,
heap.Finish());
EXPECT_EQ(1, results.heap_results.size());
const HeapSimulator::HeapResult<HloValue>& result =
results.heap_results.at(0);
EXPECT_EQ(70, result.heap_size);
EXPECT_EQ(40, result.chunk_map.at(buffer_a_).size);
EXPECT_EQ(20, result.chunk_map.at(buffer_b_).size);
EXPECT_EQ(50, result.chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.chunk_map.at(buffer_a_).offset);
EXPECT_EQ(50, result.chunk_map.at(buffer_b_).offset);
EXPECT_EQ(0, result.chunk_map.at(buffer_c_).offset);
}
class FindGlobalDecreasingSizeBestFitTest : public HeapAlgorithmTestBase {
protected:
class InheritedGlobalDecreasingSizeBestFitHeap
: public GlobalDecreasingSizeBestFitHeap<HloValue> {
public:
InheritedGlobalDecreasingSizeBestFitHeap()
: GlobalDecreasingSizeBestFitHeap(1) {}
std::pair<int64_t, int64_t> MakeFindAndCommit(
const HloValue* buffer, int64_t size, int64_t start, int64_t end,
int64_t preferred_offset = -1) {
MakeBufferInterval(buffer, size, start, end);
BufferInterval* buffer_interval = &GetBufferInterval(buffer);
Chunk chunk_candidate =
FindChunkCandidate(*buffer_interval, preferred_offset);
EXPECT_EQ(chunk_candidate.size, size);
std::pair<int64_t, int64_t> result = std::make_pair(
chunk_candidate.offset, result_.UpdatedHeapSize(chunk_candidate));
CommitChunk(*buffer_interval, chunk_candidate);
return result;
}
void MakeBufferInterval(const HloValue* buffer, int64_t size, int64_t start,
int64_t end) {
BufferInterval* buffer_interval = &buffer_intervals_[buffer];
buffer_interval->buffer = buffer;
buffer_interval->size = size;
buffer_interval->start = start;
buffer_interval->end = end;
}
void AddColocationToBuffer(const HloValue* buffer,
const HloValue* colocation) {
CHECK(buffer_intervals_.contains(buffer));
buffer_intervals_[buffer].colocations.push_back(colocation);
}
BufferInterval& GetBufferInterval(const HloValue* buffer) {
CHECK(buffer_intervals_.contains(buffer));
return buffer_intervals_[buffer];
}
std::vector<Chunk> FindChunkCandidates(
const SlicedBufferInterval& sliced_buffer_interval,
int64_t preferred_offset = -1) const {
return GlobalDecreasingSizeBestFitHeap<HloValue>::FindChunkCandidates(
sliced_buffer_interval, preferred_offset);
}
void CommitChunk(const BufferInterval& buffer_interval, Chunk chunk) {
GlobalDecreasingSizeBestFitHeap<HloValue>::CommitChunk(buffer_interval,
chunk);
}
void AddToChunkMap(const HloValue* buffer, Chunk chunk) override {
committed_[buffer].push_back(chunk);
}
const absl::flat_hash_map<const HloValue*, std::vector<Chunk>>& committed()
const {
return committed_;
}
int64_t heap_size() const { return result_.heap_size; }
private:
absl::flat_hash_map<const HloValue*, std::vector<Chunk>> committed_;
};
using BufferInterval =
InheritedGlobalDecreasingSizeBestFitHeap::BufferInterval;
using SlicedBufferInterval =
InheritedGlobalDecreasingSizeBestFitHeap::SlicedBufferInterval;
using Chunk = InheritedGlobalDecreasingSizeBestFitHeap::Chunk;
InheritedGlobalDecreasingSizeBestFitHeap heap_;
};
TEST_F(FindGlobalDecreasingSizeBestFitTest, ChunkCandidate) {
using pair = std::pair<int64_t, int64_t>;
EXPECT_EQ(pair(5, 10), heap_.MakeFindAndCommit(buffer_a_, 5, 6, 10, 5));
EXPECT_EQ(pair(0, 10), heap_.MakeFindAndCommit(buffer_b_, 10, 3, 5));
EXPECT_EQ(pair(10, 15), heap_.MakeFindAndCommit(buffer_c_, 5, 2, 8));
EXPECT_EQ(pair(0, 15), heap_.MakeFindAndCommit(buffer_d_, 5, 0, 2, 10));
EXPECT_EQ(pair(10, 20), heap_.MakeFindAndCommit(buffer_e_, 10, 11, 13, 10));
EXPECT_EQ(pair(20, 25), heap_.MakeFindAndCommit(buffer_f_, 5, 3, 5, 20));
EXPECT_EQ(pair(25, 35), heap_.MakeFindAndCommit(buffer_g_, 10, 4, 8, 15));
}
TEST_F(FindGlobalDecreasingSizeBestFitTest, FindChunkCandidates) {
{
heap_.MakeBufferInterval(buffer_a_, 10, 5, 15);
auto sliced_buffer_a = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_a_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_a);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 10)));
heap_.CommitChunk(sliced_buffer_a.full_buffer_interval(),
Chunk::FromOffsetSize(0, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(::testing::Pair(
buffer_a_, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 10)))));
EXPECT_EQ(heap_.heap_size(), 10);
}
{
heap_.MakeBufferInterval(buffer_b_, 10, 25, 35);
heap_.MakeBufferInterval(buffer_c_, 15, 10, 20);
heap_.AddColocationToBuffer(buffer_b_, buffer_c_);
auto sliced_buffer_b = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_b_));
auto sliced_buffer_c = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_c_));
sliced_buffer_b.Slice({5, 5});
sliced_buffer_b.UpdateInclusiveSliceStartTimes({25, 30});
auto chunks = heap_.FindChunkCandidates(sliced_buffer_b);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(15, 5)));
heap_.CommitChunk(BufferInterval{buffer_b_, 5, 25, 30, {},
true},
Chunk::FromOffsetSize(10, 5));
heap_.CommitChunk(
BufferInterval{buffer_b_, 10, 30, 35, {buffer_c_},
true},
Chunk::FromOffsetSize(10, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15)))));
EXPECT_EQ(heap_.heap_size(), 25);
}
{
heap_.MakeBufferInterval(buffer_d_, 5, 25, 35);
auto sliced_buffer_d = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_d_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_d);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 5)));
heap_.CommitChunk(sliced_buffer_d.full_buffer_interval(),
Chunk::FromOffsetSize(0, 5));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15))),
::testing::Pair(buffer_d_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 5)))));
EXPECT_EQ(heap_.heap_size(), 25);
}
{
heap_.MakeBufferInterval(buffer_e_, 10, 30, 35);
auto sliced_buffer_e = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_e_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_e);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(20, 10)));
heap_.CommitChunk(sliced_buffer_e.full_buffer_interval(),
Chunk::FromOffsetSize(20, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15))),
::testing::Pair(
buffer_d_, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 5))),
::testing::Pair(buffer_e_, ::testing::ElementsAre(
Chunk::FromOffsetSize(20, 10)))));
EXPECT_EQ(heap_.heap_size(), 30);
}
{
heap_.MakeBufferInterval(buffer_f_, 10, 25, 29);
auto sliced_buffer_f = SlicedBufferInterval::CreateMutableInterval(
heap_.GetBufferInterval(buffer_f_));
auto chunks = heap_.FindChunkCandidates(sliced_buffer_f);
EXPECT_THAT(chunks, ::testing::ElementsAre(Chunk::FromOffsetSize(15, 10)));
heap_.CommitChunk(sliced_buffer_f.full_buffer_interval(),
Chunk::FromOffsetSize(15, 10));
EXPECT_THAT(
heap_.committed(),
::testing::UnorderedElementsAre(
::testing::Pair(buffer_a_, ::testing::ElementsAre(
Chunk::FromOffsetSize(0, 10))),
::testing::Pair(buffer_b_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5),
Chunk::FromOffsetSize(10, 10))),
::testing::Pair(buffer_c_, ::testing::ElementsAre(
Chunk::FromOffsetSize(10, 15))),
::testing::Pair(
buffer_d_, ::testing::ElementsAre(Chunk::FromOffsetSize(0, 5))),
::testing::Pair(buffer_e_, ::testing::ElementsAre(
Chunk::FromOffsetSize(20, 10))),
::testing::Pair(buffer_f_, ::testing::ElementsAre(
Chunk::FromOffsetSize(15, 10)))));
EXPECT_EQ(heap_.heap_size(), 30);
}
}
class ConstrainedGlobalDecreasingSizeBestFitHeapTest
: public HeapAlgorithmTestBase {};
TEST_F(ConstrainedGlobalDecreasingSizeBestFitHeapTest, DecreasingSize) {
ConstrainedGlobalDecreasingSizeBestFitHeap heap(50,
1);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 30);
heap.Alloc(buffer_c_, 20);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_a_, 10);
heap.Free(buffer_b_, 30);
heap.Free(buffer_c_, 20);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(100, result.heap_size);
EXPECT_EQ(2, result.heap_results.size());
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_a_));
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_d_));
EXPECT_EQ(10, result.heap_results[0].chunk_map.at(buffer_a_).size);
EXPECT_EQ(40, result.heap_results[0].chunk_map.at(buffer_d_).size);
EXPECT_EQ(40, result.heap_results[0].chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_d_).offset);
}
TEST_F(ConstrainedGlobalDecreasingSizeBestFitHeapTest,
DecreasingSizeWithAlignment) {
ConstrainedGlobalDecreasingSizeBestFitHeap heap(70,
20);
heap.Alloc(buffer_a_, 10);
heap.Alloc(buffer_b_, 20);
heap.Alloc(buffer_c_, 50);
heap.Free(buffer_a_, 10);
heap.Alloc(buffer_d_, 40);
heap.Free(buffer_b_, 20);
heap.Free(buffer_c_, 50);
heap.Free(buffer_d_, 40);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(130, result.heap_size);
EXPECT_EQ(2, result.heap_results.size());
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_a_));
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_c_));
EXPECT_EQ(10, result.heap_results[0].chunk_map.at(buffer_a_).size);
EXPECT_EQ(50, result.heap_results[0].chunk_map.at(buffer_c_).size);
EXPECT_EQ(60, result.heap_results[0].chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_c_).offset);
}
TEST_F(ConstrainedGlobalDecreasingSizeBestFitHeapTest, ColocatedII) {
ConstrainedGlobalDecreasingSizeBestFitHeap heap(50,
20);
heap.Alloc(buffer_a_, 30);
heap.Free(buffer_a_, 30);
heap.Alloc(buffer_b_, 20);
heap.ShareWith(buffer_c_, buffer_a_, 40);
heap.Free(buffer_c_, 40);
heap.Free(buffer_b_, 20);
TF_ASSERT_OK_AND_ASSIGN(const HeapSimulator::Result<HloValue> result,
heap.Finish());
EXPECT_EQ(60, result.heap_size);
EXPECT_EQ(2, result.heap_results.size());
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_a_));
EXPECT_TRUE(result.heap_results[0].chunk_map.contains(buffer_c_));
EXPECT_EQ(30, result.heap_results[0].chunk_map.at(buffer_a_).size);
EXPECT_EQ(40, result.heap_results[0].chunk_map.at(buffer_c_).size);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_a_).offset);
EXPECT_EQ(0, result.heap_results[0].chunk_map.at(buffer_c_).offset);
}
class IntervalTreeTest : public ::testing::Test {};
TEST_F(IntervalTreeTest, InsertAndRemove) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(1, 2);
BufferIntervalTree tree;
tree.Add(1, 2, chunk);
EXPECT_TRUE(tree.Remove(1, 2, chunk));
EXPECT_FALSE(tree.Remove(1, 2, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
tree.Add(1, 2, chunk);
EXPECT_TRUE(tree.Remove(1, 2, chunk));
EXPECT_FALSE(tree.Remove(1, 2, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, InsertAndRemoveTwoLevelsLeft) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(1, 45, chunk);
EXPECT_TRUE(tree.Remove(1, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, InsertAndRemoveTwoLevelsRight) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(21, 45, chunk);
EXPECT_TRUE(tree.Remove(21, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, TwoLevelsRight_RootFirst) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(21, 45, chunk);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 45);
EXPECT_EQ(tree.GetRoot()->start, 21);
EXPECT_EQ(tree.GetRoot()->end, 45);
EXPECT_EQ(tree.GetRoot()->left, nullptr);
EXPECT_EQ(tree.GetRoot()->right, nullptr);
EXPECT_TRUE(tree.Remove(21, 45, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, TwoLevelsLeft_RootFirst) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(1, 45, chunk);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 45);
EXPECT_EQ(tree.GetRoot()->start, 1);
EXPECT_EQ(tree.GetRoot()->end, 45);
EXPECT_EQ(tree.GetRoot()->left, nullptr);
EXPECT_EQ(tree.GetRoot()->right, nullptr);
EXPECT_TRUE(tree.Remove(1, 45, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsRight) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(21, 45, chunk);
tree.Add(22, 40, chunk);
EXPECT_TRUE(tree.Remove(21, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(22, 40, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsLeftLeft) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(10, 45, chunk);
tree.Add(1, 40, chunk);
EXPECT_TRUE(tree.Remove(10, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(1, 40, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsLeftRight) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(10, 45, chunk);
tree.Add(15, 40, chunk);
EXPECT_TRUE(tree.Remove(10, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(15, 40, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 36);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsRightLeft) {
HeapSimulator::Chunk chunk = HeapSimulator::Chunk::FromOffsetSize(
1, 2);
BufferIntervalTree tree;
tree.Add(20, 36, chunk);
tree.Add(25, 45, chunk);
tree.Add(22, 40, chunk);
EXPECT_TRUE(tree.Remove(25, 45, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(20, 36, chunk));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_TRUE(tree.Remove(22, 40, chunk));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, ThreeLevelsRightLeftChunkDifferent) {
HeapSimulator::Chunk chunk1 = HeapSimulator::Chunk::FromOffsetSize(1, 2);
HeapSimulator::Chunk chunk2 = HeapSimulator::Chunk::FromOffsetSize(2, 3);
HeapSimulator::Chunk chunk3 = HeapSimulator::Chunk::FromOffsetSize(3, 4);
BufferIntervalTree tree;
tree.Add(20, 36, chunk1);
tree.Add(25, 45, chunk2);
tree.Add(22, 40, chunk3);
EXPECT_TRUE(tree.Remove(25, 45, chunk2));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_EQ(tree.GetRoot()->chunk.offset, 1);
EXPECT_EQ(tree.GetRoot()->chunk.size, 2);
EXPECT_TRUE(tree.Remove(20, 36, chunk1));
EXPECT_EQ(tree.GetRoot()->subtree_end, 40);
EXPECT_EQ(tree.GetRoot()->chunk.offset, 3);
EXPECT_EQ(tree.GetRoot()->chunk.size, 4);
EXPECT_TRUE(tree.Remove(22, 40, chunk3));
ASSERT_EQ(tree.GetRoot(), nullptr);
}
TEST_F(IntervalTreeTest, BufferIntervalTreeToAsciiArt) {
BufferIntervalTree tree;
tree.Add(15, 25, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
tree.Add(15, 19, HeapSimulator::Chunk::FromOffsetEnd(16, 48));
tree.Add(20, 22, HeapSimulator::Chunk::FromOffsetEnd(32, 64));
std::string output = tree.NodesOverlappingInTimeToAsciiArt(
18, 23, 3);
EXPECT_THAT(output, HasSubstr("Memory map for time: [18,23], "
"memory_block_size: 16, group_size: 3"));
EXPECT_THAT(output, HasSubstr("..# ##. 64"));
EXPECT_THAT(output, HasSubstr("### ##. 48"));
EXPECT_THAT(output, HasSubstr("##. ... 32"));
EXPECT_THAT(output, HasSubstr("### ### 16"));
EXPECT_THAT(output, HasSubstr("890 123"));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeToAsciiArtTooLarge) {
BufferIntervalTree tree;
tree.Add(0, 4, HeapSimulator::Chunk::FromOffsetEnd(0, 128));
tree.Add(5, 10, HeapSimulator::Chunk::FromOffsetEnd(1, 129));
std::string output = tree.NodesOverlappingInTimeToAsciiArt(
0, 10, 3);
EXPECT_THAT(
output,
HasSubstr(
"Cannot print memory usage to ASCII art. Printing nodes instead!"));
EXPECT_THAT(output, HasSubstr("start: 0 end: 4 chunk: [0,128)"));
EXPECT_THAT(output, HasSubstr("start: 5 end: 10 chunk: [1,129)"));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeToAsciiArtFreeMemory) {
BufferIntervalTree tree;
tree.Add(5, 10, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
std::string output = tree.NodesOverlappingInTimeToAsciiArt(
0, 4, 10);
EXPECT_THAT(output, StrEq("No nodes overlapping in time. Memory is free!"));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeMemoryUsedInInterval) {
BufferIntervalTree tree;
tree.Add(15, 25, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
tree.Add(15, 19, HeapSimulator::Chunk::FromOffsetEnd(16, 48));
tree.Add(20, 22, HeapSimulator::Chunk::FromOffsetEnd(32, 64));
std::vector<int64_t> memory_used_by_time = tree.MemoryUsedInInterval(
18, 23);
std::vector<int64_t> expected_memory_used_by_time = {48, 48, 48, 48, 48, 16};
EXPECT_THAT(memory_used_by_time, ContainerEq(expected_memory_used_by_time));
}
TEST_F(IntervalTreeTest, BufferIntervalTreeHeapSize) {
BufferIntervalTree tree;
tree.Add(15, 26, HeapSimulator::Chunk::FromOffsetEnd(0, 16));
tree.Add(17, 24, HeapSimulator::Chunk::FromOffsetEnd(16, 48));
tree.Add(20, 22, HeapSimulator::Chunk::FromOffsetEnd(32, 64));
EXPECT_THAT(tree.HeapSizeInInterval(15, 16), 16);
EXPECT_THAT(tree.HeapSizeInInterval(15, 19), 48);
EXPECT_THAT(tree.HeapSizeInInterval(15, 22), 64);
EXPECT_THAT(tree.HeapSizeInInterval(23, 24), 48);
EXPECT_THAT(tree.HeapSizeInInterval(25, 26), 16);
}
class SlicedBufferIntervalTest : public ::testing::Test {
public:
using HeapTy = GlobalDecreasingSizeBestFitHeap<HloValue>;
using ColocationTy = absl::InlinedVector<const HloValue*, 2>;
static std::tuple<const HloValue*, int64_t, int64_t, int64_t,
const ColocationTy&, bool>
BufferIntervalToTuple(const HeapTy::BufferInterval& buffer_interval) {
return std::make_tuple(buffer_interval.buffer, buffer_interval.size,
buffer_interval.start, buffer_interval.end,
std::ref(buffer_interval.colocations),
buffer_interval.need_allocation);
}
SlicedBufferIntervalTest() {
HloModuleConfig config;
module_ = std::make_unique<HloModule>("TestModule", config);
Shape f32vec4 = ShapeUtil::MakeShape(F32, {4});
auto builder = HloComputation::Builder("TestComputation");
auto p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4, "p0"));
auto p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4, "p1"));
builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4, HloOpcode::kAdd, p0, p1));
module_->AddEntryComputation(builder.Build());
p0_value_ = std::make_unique<HloValue>(0, p0, ShapeIndex{});
p1_value_ = std::make_unique<HloValue>(0, p1, ShapeIndex{});
full_buffer_interval_ = HeapTy::BufferInterval({
p0_value_.get(),
20,
100,
200,
{p1_value_.get()},
true,
});
sliced_buffer_interval_ = std::make_unique<HeapTy::SlicedBufferInterval>(
HeapTy::SlicedBufferInterval::CreateConstInterval(
full_buffer_interval_));
mutable_sliced_buffer_interval_ =
std::make_unique<HeapTy::SlicedBufferInterval>(
HeapTy::SlicedBufferInterval::CreateMutableInterval(
full_buffer_interval_));
}
protected:
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloValue> p0_value_;
std::unique_ptr<HloValue> p1_value_;
HeapTy::BufferInterval full_buffer_interval_;
std::unique_ptr<const HeapTy::SlicedBufferInterval> sliced_buffer_interval_;
std::unique_ptr<HeapTy::SlicedBufferInterval> mutable_sliced_buffer_interval_;
};
TEST_F(SlicedBufferIntervalTest, NoSlices) {
EXPECT_EQ(
BufferIntervalToTuple(sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple(full_buffer_interval_));
EXPECT_EQ(sliced_buffer_interval_->num_slices(), 1);
EXPECT_THAT(sliced_buffer_interval_->SliceSizesSortedByOffset(),
::testing::ElementsAre(20));
EXPECT_EQ(BufferIntervalToTuple(
sliced_buffer_interval_->IntervalForMakeFreeChunks(0)),
BufferIntervalToTuple(full_buffer_interval_));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple(full_buffer_interval_));
}
TEST_F(SlicedBufferIntervalTest, Sliced) {
std::vector<int64_t> slice_sizes = {4, 5, 5, 6};
mutable_sliced_buffer_interval_->Slice(absl::Span<int64_t>(slice_sizes));
EXPECT_EQ(mutable_sliced_buffer_interval_->num_slices(), 4);
EXPECT_THAT(mutable_sliced_buffer_interval_->SliceSizesSortedByOffset(),
::testing::ElementsAre(4, 5, 5, 6));
mutable_sliced_buffer_interval_->UpdateInclusiveSliceStartTimes(
{100, 125, 150, 175});
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(0)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 100, 124, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(1)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 125, 149, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(2)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 150, 174, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(3)),
BufferIntervalToTuple({p0_value_.get(), 20, 175, 200,
ColocationTy({p1_value_.get()}), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple({p0_value_.get(), 20, 100, 200,
ColocationTy({p1_value_.get()}), true}));
mutable_sliced_buffer_interval_->UpdateExclusiveSliceStartTimes(
{100, 125, 150, 175});
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(0)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 101, 125, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(1)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 126, 150, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(2)),
BufferIntervalToTuple(
{p0_value_.get(), 4, 151, 175, ColocationTy(), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(3)),
BufferIntervalToTuple({p0_value_.get(), 20, 176, 200,
ColocationTy({p1_value_.get()}), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple({p0_value_.get(), 20, 101, 200,
ColocationTy({p1_value_.get()}), true}));
mutable_sliced_buffer_interval_->UpdateEndTime(300);
EXPECT_EQ(mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(2).end,
175);
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->IntervalForMakeFreeChunks(3)),
BufferIntervalToTuple({p0_value_.get(), 20, 176, 300,
ColocationTy({p1_value_.get()}), true}));
EXPECT_EQ(BufferIntervalToTuple(
mutable_sliced_buffer_interval_->full_buffer_interval()),
BufferIntervalToTuple({p0_value_.get(), 20, 101, 300,
ColocationTy({p1_value_.get()}), true}));
}
class SlicedAllocationFinderTest : public ::testing::Test {
public:
using HeapTy = GlobalDecreasingSizeBestFitHeap<HloValue>;
using FreeChunks = typename HeapTy::FreeChunks;
using Chunk = HeapSimulator::Chunk;
using Finder = typename HeapTy::SlicedAllocationFinder;
protected:
std::unique_ptr<SliceTimePermutationIterator> NewPermutationIterator(
int64_t num_slices) {
std::vector<int64_t> inclusive_start_times;
inclusive_start_times.reserve(num_slices);
for (int64_t start_time = 0; start_time < num_slices; ++start_time) {
inclusive_start_times.push_back(start_time);
}
return SliceTimePermutationIterator::CreateForNewAllocation(
SliceTimePermutationIterator::Ty::kAll, inclusive_start_times);
}
};
TEST_F(SlicedAllocationFinderTest, NoSlices) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(45, 3),
Chunk::FromOffsetSize(48, 0)));
}
TEST_F(SlicedAllocationFinderTest, NoSlicesLargerMaxColloc) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3};
int64_t max_colocation_size = 6;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(60, 3),
Chunk::FromOffsetSize(63, 3)));
}
TEST_F(SlicedAllocationFinderTest, NoSlicesSmallestTie) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 13},
{15, 40},
{45, 48},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(10, 3),
Chunk::FromOffsetSize(13, 0)));
}
TEST_F(SlicedAllocationFinderTest, LeftHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(48, 3),
Chunk::FromOffsetSize(51, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, RightHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{51, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(51, 3), Chunk::FromOffsetSize(48, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, MiddleHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 3), Chunk::FromOffsetSize(51, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, ManyHoles) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 31},
{39, 42},
{46, 51},
{54, 60},
{62, 64},
},
{
{5, 31},
{38, 44},
{46, 51},
{54, 59},
{62, 64},
},
{
{5, 31},
{36, 59},
{62, 64},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(40, 3),
Chunk::FromOffsetSize(43, 3), Chunk::FromOffsetSize(49, 0)));
}
TEST_F(SlicedAllocationFinderTest, EarlySliceTimesHaveLargeFreeChunks) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{6, 68},
},
{
{5, 25},
{28, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 3), Chunk::FromOffsetSize(51, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, DifferentSliceSizes1) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{46, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{46, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{42, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {5, 3, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(47, 3), Chunk::FromOffsetSize(50, 4),
Chunk::FromOffsetSize(42, 5), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, DifferentSliceSizes2) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{46, 49},
{60, 70},
},
{
{5, 7},
{10, 40},
{46, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{42, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {5, 3, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(10, 5), Chunk::FromOffsetSize(15, 3),
Chunk::FromOffsetSize(18, 4), Chunk::FromOffsetSize(22, 0)));
}
TEST_F(SlicedAllocationFinderTest, ZeroSizeFreeChunk) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 5},
{10, 40},
{45, 48},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 45},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(60, 3), Chunk::FromOffsetSize(63, 3),
Chunk::FromOffsetSize(66, 3), Chunk::FromOffsetSize(69, 0)));
}
TEST_F(SlicedAllocationFinderTest, LargerMaxColloc) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = 10;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(60, 3), Chunk::FromOffsetSize(63, 3),
Chunk::FromOffsetSize(66, 3), Chunk::FromOffsetSize(69, 1)));
}
TEST_F(SlicedAllocationFinderTest, PreferredOffsetFit) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = 20;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(20, 3), Chunk::FromOffsetSize(23, 3),
Chunk::FromOffsetSize(26, 3), Chunk::FromOffsetSize(29, 0)));
}
TEST_F(SlicedAllocationFinderTest, PreferredOffsetNoFit) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{48, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = 35;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 3), Chunk::FromOffsetSize(51, 3),
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, Misaligned) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{47, 53},
{60, 70},
},
{
{5, 7},
{10, 40},
{47, 57},
{60, 70},
},
{
{5, 7},
{10, 40},
{43, 57},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {4, 4, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 2;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 4), Chunk::FromOffsetSize(52, 4),
Chunk::FromOffsetSize(44, 4), Chunk::FromOffsetSize(56, 0)));
}
TEST_F(SlicedAllocationFinderTest, PreferredOffsetMisaligned) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{47, 53},
{60, 70},
},
{
{5, 7},
{10, 40},
{47, 57},
{60, 70},
},
{
{5, 7},
{10, 40},
{43, 57},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {4, 4, 4};
int64_t max_colocation_size = -1;
int64_t preferred_offset = 21;
int64_t alignment = 2;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(48, 4), Chunk::FromOffsetSize(52, 4),
Chunk::FromOffsetSize(44, 4), Chunk::FromOffsetSize(56, 0)));
}
TEST_F(SlicedAllocationFinderTest, CorrectInitialization1) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 11},
{15, 21},
},
{
{5, 11},
{25, 31},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(5, 3),
Chunk::FromOffsetSize(8, 3),
Chunk::FromOffsetSize(11, 0)));
}
TEST_F(SlicedAllocationFinderTest, CorrectInitialization2) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 16},
{20, 26},
{40, 43},
},
{
{5, 16},
{26, 32},
{42, 45},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(free_chunks_per_slice_time, sorted_slice_sizes,
max_colocation_size, preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()));
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(Chunk::FromOffsetSize(5, 3),
Chunk::FromOffsetSize(8, 3),
Chunk::FromOffsetSize(11, 0)));
}
TEST_F(SlicedAllocationFinderTest, LeftHoleNotAllowedToStartAtFirstOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 49},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 52},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 55},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(49, 3),
Chunk::FromOffsetSize(52, 3), Chunk::FromOffsetSize(55, 0)));
}
TEST_F(SlicedAllocationFinderTest, LeftHoleAllowedToIncludeNoStartOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 48},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 51},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 46; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(45, 3), Chunk::FromOffsetSize(48, 3),
Chunk::FromOffsetSize(51, 3), Chunk::FromOffsetSize(54, 0)));
}
TEST_F(SlicedAllocationFinderTest, RightHoleNotAllowedToStartAtFirstOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{51, 55},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 55},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 55},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(52, 3), Chunk::FromOffsetSize(49, 3),
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(55, 0)));
}
TEST_F(SlicedAllocationFinderTest, RightHoleNotAllowedOffsetsFindsNewHole) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{51, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{48, 54},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 54},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.Find(),
::testing::ElementsAre(
Chunk::FromOffsetSize(60, 3), Chunk::FromOffsetSize(63, 3),
Chunk::FromOffsetSize(66, 3), Chunk::FromOffsetSize(69, 0)));
}
TEST_F(SlicedAllocationFinderTest, FindForOffset) {
std::vector<FreeChunks> free_chunks_per_slice_time = {
{
{5, 7},
{10, 40},
{45, 49},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 52},
{60, 70},
},
{
{5, 7},
{10, 40},
{45, 55},
{60, 70},
},
};
std::vector<int64_t> sorted_slice_sizes = {3, 3, 3};
int64_t max_colocation_size = -1;
int64_t preferred_offset = -1;
int64_t alignment = 1;
Finder finder(
free_chunks_per_slice_time, sorted_slice_sizes, max_colocation_size,
preferred_offset, alignment,
NewPermutationIterator(sorted_slice_sizes.size()),
[](int64_t offset) { return offset != 45; });
EXPECT_THAT(finder.FindForOffset(10),
::testing::ElementsAre(
Chunk::FromOffsetSize(10, 3), Chunk::FromOffsetSize(13, 3),
Chunk::FromOffsetSize(16, 3), Chunk::FromOffsetSize(19, 0)));
EXPECT_THAT(finder.FindForOffset(20),
::testing::ElementsAre(
Chunk::FromOffsetSize(20, 3), Chunk::FromOffsetSize(23, 3),
Chunk::FromOffsetSize(26, 3), Chunk::FromOffsetSize(29, 0)));
EXPECT_THAT(finder.FindForOffset(45),
::testing::IsEmpty());
EXPECT_THAT(finder.FindForOffset(46),
::testing::ElementsAre(
Chunk::FromOffsetSize(46, 3), Chunk::FromOffsetSize(49, 3),
Chunk::FromOffsetSize(52, 3), Chunk::FromOffsetSize(55, 0)));
EXPECT_THAT(finder.FindForOffset(59),
::testing::IsEmpty());
EXPECT_THAT(finder.FindForOffset(61),
::testing::ElementsAre(
Chunk::FromOffsetSize(61, 3), Chunk::FromOffsetSize(64, 3),
Chunk::FromOffsetSize(67, 3), Chunk::FromOffsetSize(70, 0)));
}
class SliceTimePermutationIteratorTest : public ::testing::Test {
protected:
struct NewAllocationTestCase {
void Test() const {
auto iterator = SliceTimePermutationIterator::CreateForNewAllocation(
ty, inclusive_start_times);
for (int i = 0; i < 5; ++i) {
VLOG(2) << "Test case try #" << i << ": NewAllocation, " << name;
EXPECT_THAT(GetPermutations(iterator.get()),
::testing::ElementsAreArray(expected_permutations))
<< "Failed NewAllocation, " << name;
}
}
std::string name;
SliceTimePermutationIterator::Ty ty;
std::vector<int64_t> inclusive_start_times;
std::vector<std::vector<int64_t>> expected_permutations;
};
struct RepackTestCase {
void Test() const {
auto iterator = SliceTimePermutationIterator::CreateForRepack(
ty, (original_slice_data.has_value() ? &(*original_slice_data)
: nullptr));
for (int i = 0; i < 5; ++i) {
VLOG(2) << "Test case try #" << i << ": Repack, " << name;
EXPECT_THAT(GetPermutations(iterator.get()),
::testing::ElementsAreArray(expected_permutations))
<< "Failed Repack, " << name;
}
}
std::string name;
SliceTimePermutationIterator::Ty ty;
std::optional<SlicedAllocationData> original_slice_data;
std::vector<std::vector<int64_t>> expected_permutations;
};
static std::vector<std::vector<int64_t>> GetPermutations(
SliceTimePermutationIterator* it) {
std::vector<std::vector<int64_t>> results;
for (it->Begin(); !it->Done(); it->Next()) {
absl::Span<const int64_t> permutation = it->Get();
results.push_back(
std::vector<int64_t>(permutation.begin(), permutation.end()));
}
return results;
}
};
TEST_F(SliceTimePermutationIteratorTest, NewAllocations) {
std::vector<NewAllocationTestCase> test_cases = {
{
"0 slices, all permutations",
SliceTimePermutationIterator::Ty::kAll,
{},
{},
},
{
"1 slice, all permutations",
SliceTimePermutationIterator::Ty::kAll,
{0},
{{0}},
},
{
"2 slices, all permutations",
SliceTimePermutationIterator::Ty::kAll,
{10, 20},
{{0, 1}, {1, 0}},
},
{
"many slices, all permutations, unique start times",
SliceTimePermutationIterator::Ty::kAll,
{40, 10, 450},
{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}},
},
{
"many slices, all permutations, non-unique start times",
SliceTimePermutationIterator::Ty::kAll,
{40, 10, 450, 10},
{
{0, 1, 2, 3},
{0, 1, 3, 2},
{0, 2, 1, 3},
{0, 2, 3, 1},
{0, 3, 1, 2},
{0, 3, 2, 1},
{2, 0, 1, 3},
{2, 0, 3, 1},
{2, 3, 0, 1},
{3, 0, 1, 2},
{3, 0, 2, 1},
{3, 2, 0, 1},
},
},
{
"0 slices, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
{},
{},
},
{
"1 slice, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
{0},
{{0}},
},
{
"2 slices, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
{10, 20},
{{0, 1}, {1, 0}},
},
{
"many slices, preferred permutations, unique start times",
SliceTimePermutationIterator::Ty::kPreferred,
{40, 10, 450, 12, 14},
{{0, 1, 2, 3, 4}, {4, 3, 2, 1, 0}, {3, 1, 0, 2, 4}},
},
{
"many slices, preferred permutations, non-unique start times 1",
SliceTimePermutationIterator::Ty::kPreferred,
{40, 10, 450, 10},
{
{0, 1, 2, 3},
{3, 2, 1, 0},
{3, 1, 0, 2}},
},
{
"many slices, preferred permutations, non-unique start times 2",
SliceTimePermutationIterator::Ty::kPreferred,
{40, 40},
{
{0, 1},
},
},
};
for (const NewAllocationTestCase& test_case : test_cases) {
test_case.Test();
}
}
TEST_F(SliceTimePermutationIteratorTest, Repacks) {
std::vector<RepackTestCase> test_cases = {
{
"no slice data, all permutations",
SliceTimePermutationIterator::Ty::kAll,
std::nullopt,
{{0}},
},
{
"0 slices, all permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{},
{},
},
{
"1 slice, all permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
}},
{{0}},
},
{
"2 slices, uniform slice size, all permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
}},
{{0, 1}, {1, 0}},
},
{
"many slices, uniform slice size, unique start times, all "
"permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{1, 3, 3},
}},
{{0, 1, 2}, {0, 2, 1}, {1, 0, 2}, {1, 2, 0}, {2, 0, 1}, {2, 1, 0}},
},
{
"many slices, non-uniform slice size, unique start times, all "
"permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{2, 2, 3},
{1, 3, 2},
}},
{
{0, 2, 1},
{1, 2, 0},
},
},
{
"many slices, non-uniform slice size, non-unique start times, all "
"permutations",
SliceTimePermutationIterator::Ty::kAll,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{2, 3, 1},
{1, 5, 1},
{2, 6, 3},
{3, 8, 4},
}},
{
{0, 1, 2, 3, 4, 5},
{0, 1, 4, 3, 2, 5},
{0, 3, 1, 2, 4, 5},
{0, 3, 4, 1, 2, 5},
{3, 0, 1, 2, 4, 5},
{3, 0, 4, 1, 2, 5},
},
},
{
"no slice data, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
std::nullopt,
{{0}},
},
{
"0 slices, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{},
{},
},
{
"1 slice, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
}},
{{0}},
},
{
"2 slices, uniform slice size, preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
}},
{{0, 1}, {1, 0}},
},
{
"many slices, uniform slice size, unique start times, preferred "
"permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{1, 3, 3},
}},
{{0, 1, 2}, {2, 1, 0}, {1, 0, 2}},
},
{
"many slices, non-uniform slice size, unique start times, preferred "
"permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{2, 2, 3},
{1, 3, 2},
}},
{
{0, 2, 1},
{1, 2, 0},
},
},
{
"many slices, non-uniform slice size, non-unique start times, "
"preferred permutations",
SliceTimePermutationIterator::Ty::kPreferred,
SlicedAllocationData{{
{1, 1, 1},
{1, 2, 2},
{2, 3, 1},
{1, 5, 1},
{2, 6, 3},
{3, 8, 4},
}},
{
{0, 2, 1, 3, 4, 5},
{3, 2, 1, 0, 4, 5},
},
},
};
for (const RepackTestCase& test_case : test_cases) {
test_case.Test();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/heap_simulator/heap_simulator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/heap_simulator/heap_simulator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4158aec7-4771-4a69-b0de-b4fe7c30f13e | cpp | tensorflow/tensorflow | ctc_beam_search_decoder | tensorflow/lite/kernels/ctc/ctc_beam_search_decoder.cc | tensorflow/lite/kernels/ctc/ctc_beam_search_decoder_test.cc | #include <algorithm>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/ctc/ctc_beam_search.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace custom {
namespace ctc_beam_search_decoder {
constexpr int kInputsTensor = 0;
constexpr int kSequenceLengthTensor = 1;
typedef struct {
int beam_width;
int top_paths;
bool merge_repeated;
} CTCBeamSearchDecoderParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_CHECK(buffer != nullptr);
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
CTCBeamSearchDecoderParams* option = new CTCBeamSearchDecoderParams;
option->beam_width = m["beam_width"].AsInt32();
option->top_paths = m["top_paths"].AsInt32();
option->merge_repeated = m["merge_repeated"].AsBool();
return option;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<CTCBeamSearchDecoderParams*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);
const int top_paths = option->top_paths;
TF_LITE_ENSURE(context, option->beam_width >= top_paths);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1);
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3);
TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32);
const int batch_size = SizeOfDimension(inputs, 1);
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1);
TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size);
TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32);
for (int i = 0; i < top_paths; ++i) {
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &indices));
SetTensorToDynamic(indices);
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, i + top_paths, &values));
SetTensorToDynamic(values);
TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + 2 * top_paths,
&output_shape));
SetTensorToDynamic(output_shape);
}
TfLiteTensor* log_probability_output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, top_paths * 3,
&log_probability_output));
TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2);
log_probability_output_shape_array->data[0] = batch_size;
log_probability_output_shape_array->data[1] = top_paths;
return context->ResizeTensor(context, log_probability_output,
log_probability_output_shape_array);
}
TfLiteStatus Resize(TfLiteContext* context,
std::initializer_list<int32_t> output_shape,
TfLiteTensor* output) {
const int dimensions = output_shape.size();
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(dimensions);
int i = 0;
for (const int v : output_shape) {
output_shape_array->data[i++] = v;
}
return context->ResizeTensor(context, output, output_shape_array);
}
TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, p + top_paths, &values));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths,
&decoded_shape));
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);
const int max_time = SizeOfDimension(inputs, 0);
const int batch_size = SizeOfDimension(inputs, 1);
const int num_classes = SizeOfDimension(inputs, 2);
const int beam_width = option->beam_width;
const int top_paths = option->top_paths;
const bool merge_repeated = option->merge_repeated;
for (int i = 0; i < batch_size; ++i) {
TF_LITE_ENSURE(context,
max_time >= GetTensorData<int32_t>(sequence_length)[i]);
}
std::vector<optimized_ops::TTypes<float>::UnalignedConstMatrix> input_list_t;
input_list_t.reserve(max_time);
for (std::size_t t = 0; t < max_time; ++t) {
input_list_t.emplace_back(
GetTensorData<float>(inputs) + t * batch_size * num_classes, batch_size,
num_classes);
}
::tflite::custom::ctc::CTCBeamSearchDecoder<>::DefaultBeamScorer beam_scorer;
::tflite::custom::ctc::CTCBeamSearchDecoder<> beam_search(
num_classes, beam_width, &beam_scorer, 1 ,
merge_repeated);
float* input_chip_t_data =
static_cast<float*>(malloc(num_classes * sizeof(float)));
Eigen::array<Eigen::DenseIndex, 1> dims;
dims[0] = num_classes;
optimized_ops::TTypes<float>::Flat input_chip_t(input_chip_t_data, dims);
std::vector<std::vector<std::vector<int>>> best_paths(batch_size);
std::vector<float> log_probs;
TfLiteTensor* log_probabilities;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, 3 * top_paths, &log_probabilities));
float* log_probabilities_output = GetTensorData<float>(log_probabilities);
for (int b = 0; b < batch_size; ++b) {
auto& best_paths_b = best_paths[b];
best_paths_b.resize(top_paths);
for (int t = 0; t < GetTensorData<int32_t>(sequence_length)[b]; ++t) {
input_chip_t = input_list_t[t].chip(b, 0);
auto input_bi =
Eigen::Map<const Eigen::ArrayXf>(input_chip_t.data(), num_classes);
beam_search.Step(input_bi);
}
TF_LITE_ENSURE(context, beam_search.TopPaths(top_paths, &best_paths_b,
&log_probs, merge_repeated));
beam_search.Reset();
for (int bp = 0; bp < top_paths; ++bp) {
log_probabilities_output[b * top_paths + bp] = log_probs[bp];
}
}
free(input_chip_t_data);
return StoreAllDecodedSequences(context, best_paths, node, top_paths);
}
}
TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER() {
static TfLiteRegistration r = {
ctc_beam_search_decoder::Init, ctc_beam_search_decoder::Free,
ctc_beam_search_decoder::Prepare, ctc_beam_search_decoder::Eval};
return &r;
}
}
}
} | #include <functional>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace ops {
namespace custom {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class CTCBeamSearchDecoderOpModel : public SingleOpModel {
public:
CTCBeamSearchDecoderOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> sequence_length_shape,
int beam_width, int top_paths,
bool merge_repeated) {
inputs_ = AddInput(TensorType_FLOAT32);
sequence_length_ = AddInput(TensorType_INT32);
for (int i = 0; i < top_paths * 3; ++i) {
outputs_.push_back(AddOutput(TensorType_INT32));
}
outputs_.push_back(AddOutput(TensorType_FLOAT32));
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("beam_width", beam_width);
fbb.Int("top_paths", top_paths);
fbb.Bool("merge_repeated", merge_repeated);
});
fbb.Finish();
SetCustomOp("CTCBeamSearchDecoder", fbb.GetBuffer(),
Register_CTC_BEAM_SEARCH_DECODER);
BuildInterpreter({input_shape, sequence_length_shape});
}
int inputs() { return inputs_; }
int sequence_length() { return sequence_length_; }
std::vector<std::vector<int>> GetDecodedOutpus() {
std::vector<std::vector<int>> outputs;
for (int i = 0; i < outputs_.size() - 1; ++i) {
outputs.push_back(ExtractVector<int>(outputs_[i]));
}
return outputs;
}
std::vector<float> GetLogProbabilitiesOutput() {
return ExtractVector<float>(outputs_[outputs_.size() - 1]);
}
std::vector<std::vector<int>> GetOutputShapes() {
std::vector<std::vector<int>> output_shapes;
for (const int output : outputs_) {
output_shapes.push_back(GetTensorShape(output));
}
return output_shapes;
}
private:
int inputs_;
int sequence_length_;
std::vector<int> outputs_;
};
TEST(CTCBeamSearchTest, SimpleTest) {
CTCBeamSearchDecoderOpModel m({2, 1, 2}, {1}, 1, 1, true);
m.PopulateTensor<float>(m.inputs(),
{-0.50922557, -1.35512652, -2.55445064, -1.58419356});
m.PopulateTensor<int>(m.sequence_length(), {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(1, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(1));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(1, 1));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(1, 1));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-0.357094})));
}
TEST(CTCBeamSearchTest, MultiBatchTest) {
CTCBeamSearchDecoderOpModel m({3, 3, 3}, {3}, 1, 1, true);
m.PopulateTensor<float>(
m.inputs(),
{-0.63649208, -0.00487571, -0.04249819, -0.67754697, -1.0341399,
-2.14717721, -0.77686821, -3.41973774, -0.05151402, -0.21482619,
-0.57411168, -1.45039917, -0.73769373, -2.10941739, -0.44818325,
-0.25287673, -2.80057302, -0.54748312, -0.73334867, -0.86537719,
-0.2065197, -0.18725838, -1.42770405, -0.86051965, -1.61642301,
-2.07275114, -0.9201845});
m.PopulateTensor<int>(m.sequence_length(), {3, 3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(4, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(4));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(3, 1));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 0, 1, 1, 0, 2, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(1, 0, 0, 0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(3, 2));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-1.88343, -1.41188, -1.20958})));
}
TEST(CTCBeamSearchTest, MultiPathsTest) {
CTCBeamSearchDecoderOpModel m({3, 2, 5}, {2}, 3, 2, true);
m.PopulateTensor<float>(
m.inputs(),
{-2.206851, -0.09542714, -0.2393415, -3.81866197, -0.27241158,
-0.20371124, -0.68236623, -1.1397166, -0.17422639, -1.85224048,
-0.9406037, -0.32544678, -0.21846784, -0.38377237, -0.33498676,
-0.10139782, -0.51886883, -0.21678554, -0.15267063, -1.91164412,
-0.31328673, -0.27462716, -0.65975336, -1.53671973, -2.76554225,
-0.23920634, -1.2370502, -4.98751576, -3.12995717, -0.43129368});
m.PopulateTensor<int>(m.sequence_length(), {3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 7);
EXPECT_THAT(output_shapes[0], ElementsAre(4, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(3, 2));
EXPECT_THAT(output_shapes[2], ElementsAre(4));
EXPECT_THAT(output_shapes[3], ElementsAre(3));
EXPECT_THAT(output_shapes[4], ElementsAre(2));
EXPECT_THAT(output_shapes[5], ElementsAre(2));
EXPECT_THAT(output_shapes[6], ElementsAre(2, 2));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 6);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 0, 1, 1, 0, 1, 1));
EXPECT_THAT(decoded_outputs[1], ElementsAre(0, 0, 0, 1, 1, 0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(1, 2, 3, 0));
EXPECT_THAT(decoded_outputs[3], ElementsAre(2, 1, 0));
EXPECT_THAT(decoded_outputs[4], ElementsAre(2, 2));
EXPECT_THAT(decoded_outputs[5], ElementsAre(2, 2));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(
ArrayFloatNear({-2.65148, -2.65864, -2.17914, -2.61357})));
}
TEST(CTCBeamSearchTest, NonEqualSequencesTest) {
CTCBeamSearchDecoderOpModel m({3, 3, 4}, {3}, 3, 1, true);
m.PopulateTensor<float>(
m.inputs(),
{-1.26658163, -0.25760023, -0.03917975, -0.63772235, -0.03794756,
-0.45063099, -0.27706473, -0.01569179, -0.59940385, -0.35700127,
-0.48920721, -1.42635476, -1.3462478, -0.02565498, -0.30179568,
-0.6491698, -0.55017719, -2.92291466, -0.92522973, -0.47592022,
-0.07099135, -0.31575624, -0.86345281, -0.36017021, -0.79208612,
-1.75306124, -0.65089224, -0.00912786, -0.42915003, -1.72606203,
-1.66337589, -0.70800793, -2.52272352, -0.67329562, -2.49145522,
-0.49786342});
m.PopulateTensor<int>(m.sequence_length(), {1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(3, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(3));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(3, 1));
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 1, 0, 2, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(2, 0, 1));
EXPECT_THAT(decoded_outputs[2], ElementsAre(3, 1));
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-0.97322, -1.16334, -2.15553})));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/ctc_beam_search_decoder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/ctc_beam_search_decoder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d75b907d-df98-4296-ba6f-3db885a2d24f | cpp | tensorflow/tensorflow | mlir_pass_instrumentation | tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.cc | tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/platform/logging.h"
namespace mlir {
class MlirPassInstrumentationRegistry {
public:
static MlirPassInstrumentationRegistry& Instance() {
static MlirPassInstrumentationRegistry* r =
new MlirPassInstrumentationRegistry;
return *r;
}
std::unordered_map<std::string,
std::function<std::unique_ptr<PassInstrumentation>()>>
instrumentors_;
};
void RegisterPassInstrumentor(
const std::string& name,
std::function<std::unique_ptr<PassInstrumentation>()> creator) {
MlirPassInstrumentationRegistry& r =
MlirPassInstrumentationRegistry::Instance();
auto result = r.instrumentors_.emplace(name, creator);
if (!result.second) {
VLOG(1) << "Duplicate MLIR pass instrumentor registration";
}
}
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>>
GetPassInstrumentors() {
MlirPassInstrumentationRegistry& r =
MlirPassInstrumentationRegistry::Instance();
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>> result;
result.reserve(r.instrumentors_.size());
std::transform(r.instrumentors_.begin(), r.instrumentors_.end(),
std::back_inserter(result), [](auto v) { return v.second; });
return result;
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include <cstddef>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace mlir {
namespace {
static const char* kTestInstrumentationName = "test-intrumentatron";
static const char* kTestInstrumentationSearch = "tf.Identity";
struct StringStream : public llvm::raw_ostream {
StringStream() { SetUnbuffered(); }
~StringStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
ss.write(ptr, size);
}
std::stringstream ss;
};
class TestPassInstrumentation : public ::testing::Test {
public:
void SetPassThatChangedIdentity(absl::string_view pass_name) {
pass_that_changed_identity_ = pass_name;
}
absl::string_view GetPassThatChangedIdentity() {
return pass_that_changed_identity_;
}
private:
std::string pass_that_changed_identity_;
friend class TestInstrumentor;
};
class TestInstrumentor : public PassInstrumentation {
public:
explicit TestInstrumentor(TestPassInstrumentation* test) : test_(test) {}
private:
void runBeforePass(Pass* pass, Operation* op) override {
StringStream stream;
op->print(stream, mlir::OpPrintingFlags().useLocalScope());
ops_seen_by_pass_[pass] = stream.ss.str();
}
void runAfterPass(Pass* pass, Operation* op) override {
StringStream stream;
op->print(stream, mlir::OpPrintingFlags().useLocalScope());
if (!absl::StrContains(stream.ss.str(), kTestInstrumentationSearch) &&
absl::StrContains(ops_seen_by_pass_[pass],
kTestInstrumentationSearch)) {
test_->SetPassThatChangedIdentity(pass->getName().str());
}
}
private:
TestPassInstrumentation* test_;
std::unordered_map<mlir::Pass*, std::string> ops_seen_by_pass_;
};
TEST_F(TestPassInstrumentation, CreatedCalledAndSetsPassName) {
RegisterPassInstrumentor(kTestInstrumentationName, [&]() {
return std::make_unique<TestInstrumentor>(this);
});
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
SetPassThatChangedIdentity("");
std::vector<::tensorflow::TensorShape> arg_shapes = {{1}};
auto compilation_result = tensorflow::XlaCompilationResult();
TF_EXPECT_OK(tensorflow::CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result)
.status());
EXPECT_FALSE(GetPassThatChangedIdentity().empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d899afcc-2a19-49aa-8032-5f951e1034ad | cpp | tensorflow/tensorflow | reduction_dimension_grouper | third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper.cc | third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper_test.cc | #include "xla/service/gpu/transforms/reduction_dimension_grouper.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReduceDimensionGroupVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(4) << "Input: " << reduce->ToString();
absl::InlinedVector<HloInstruction *, 2> reduce_inputs_grouped;
std::vector<int64_t> reduced_dims_grouped;
int idx = -1;
for (HloInstruction *operand : reduce->inputs()) {
idx++;
std::vector<int64_t> new_grouped_dims;
const Shape &shape = operand->shape();
CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape))
<< "Default layout should be enforced on reduction operand";
auto is_reduced = [&](int dim) {
return absl::c_linear_search(reduce->dimensions(), dim);
};
bool changed = false;
int64_t next_dim_size = 1;
for (int logical_dim = 0; logical_dim < shape.rank(); logical_dim++) {
VLOG(5) << "Processing dimension " << logical_dim << " of size "
<< shape.dimensions(logical_dim);
if (is_reduced(logical_dim) && logical_dim < shape.rank() - 1 &&
is_reduced(logical_dim + 1)) {
VLOG(5) << "This and consecutive dimension are reduced, merging";
changed = true;
next_dim_size *= shape.dimensions(logical_dim);
continue;
}
if (is_reduced(logical_dim)) {
new_grouped_dims.push_back(next_dim_size *
shape.dimensions(logical_dim));
if (idx == 0) {
reduced_dims_grouped.push_back(new_grouped_dims.size() - 1);
}
next_dim_size = 1;
} else {
new_grouped_dims.push_back(shape.dimensions(logical_dim));
}
}
if (!changed) {
return absl::OkStatus();
}
Shape grouped_shape =
ShapeUtil::MakeShape(shape.element_type(), new_grouped_dims);
reduce_inputs_grouped.push_back(reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(grouped_shape, operand),
&operand->metadata()));
VLOG(5) << "Adding bitcast: " << reduce_inputs_grouped.back()->ToString();
}
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
reduce->shape(), reduce_inputs_grouped, reduce->init_values(),
reduced_dims_grouped, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionDimensionGrouper::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed, ReduceDimensionGroupVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/transforms/reduction_dimension_grouper.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionDimensionGrouperTest : public HloTestBase {
public:
void CheckDimensionGrouper(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionDimensionGrouper{}, expected);
}
};
TEST_F(ReductionDimensionGrouperTest, ReductionWithGrouping) {
const char* hlo = R"(
HloModule ReductionWithGrouping
add {
accum = f32[] parameter(0)
op = f32[] parameter(1)
ROOT out = f32[] add(accum, op)
}
ENTRY main {
input = f32[100,10,32,3]{3,2,1,0} parameter(0)
zero = f32[] constant(0)
ROOT out = f32[100,10]{0,1} reduce(input, zero), dimensions={2,3}, to_apply=add
}
)";
CheckDimensionGrouper(hlo,
R"(
)");
}
TEST_F(ReductionDimensionGrouperTest, ReductionWithGroupingVariadic) {
const char* hlo = R"(
HloModule ReductionWithGrouping
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
input = f32[100,10,32,3]{3,2,1,0} parameter(0)
idxs = u32[100,10,32,3]{3,2,1,0} parameter(1)
zero = f32[] constant(0)
zero_idx = u32[] constant(0)
ROOT out = (f32[100,10]{1,0}, u32[100,10]{1,0}) reduce(input, idxs, zero, zero_idx), dimensions={2,3}, to_apply=argmax
}
)";
CheckDimensionGrouper(hlo, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
885c633f-e336-4b1d-baa1-74c080d9a5ac | cpp | tensorflow/tensorflow | dense_image_warp | tensorflow/lite/kernels/perception/dense_image_warp.cc | tensorflow/lite/kernels/perception/dense_image_warp_test.cc | #include <algorithm>
#include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace dense_image_warp {
constexpr int kInputTensor = 0;
constexpr int kFlowTensor = 1;
constexpr int kOutputTensor = 0;
inline void DenseImageWarp(const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& flow_shape,
const float* flow_data, float* output_data) {
const int batches = MatchingDim(input_shape, 0, flow_shape, 0);
const int height = MatchingDim(input_shape, 1, flow_shape, 1);
const int width = MatchingDim(input_shape, 2, flow_shape, 2);
const int channels = input_shape.Dims(3);
TFLITE_CHECK_EQ(flow_shape.Dims(3), 2);
const int max_floor_y = height - 2;
const int max_floor_x = width - 2;
for (int batch = 0; batch < batches; ++batch) {
for (int in_y = 0; in_y < height; ++in_y) {
for (int in_x = 0; in_x < width; ++in_x) {
float querry_point_y =
in_y - flow_data[Offset(flow_shape, batch, in_y, in_x, 0)];
float querry_point_x =
in_x - flow_data[Offset(flow_shape, batch, in_y, in_x, 1)];
int floor_y =
std::min(std::max(0, static_cast<int>(std::floor(querry_point_y))),
max_floor_y);
int floor_x =
std::min(std::max(0, static_cast<int>(std::floor(querry_point_x))),
max_floor_x);
float alpha_y =
std::min(std::max(0.0f, querry_point_y - floor_y), 1.0f);
float alpha_x =
std::min(std::max(0.0f, querry_point_x - floor_x), 1.0f);
for (int c = 0; c < channels; ++c) {
float top_left =
input_data[Offset(input_shape, batch, floor_y, floor_x, c)];
float top_right =
input_data[Offset(input_shape, batch, floor_y, floor_x + 1, c)];
float bottom_left =
input_data[Offset(input_shape, batch, floor_y + 1, floor_x, c)];
float bottom_right = input_data[Offset(input_shape, batch,
floor_y + 1, floor_x + 1, c)];
float interp_top = alpha_x * (top_right - top_left) + top_left;
float interp_bottom =
alpha_x * (bottom_right - bottom_left) + bottom_left;
float interp = alpha_y * (interp_bottom - interp_top) + interp_top;
output_data[Offset(input_shape, batch, in_y, in_x, c)] = interp;
}
}
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* flow = GetInput(context, node, kFlowTensor);
TF_LITE_ENSURE(context, flow != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, flow->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(flow), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
const RuntimeShape input_shape = GetTensorShape(input);
const RuntimeShape flow_shape = GetTensorShape(flow);
TF_LITE_ENSURE_EQ(context, input_shape.Dims(0), flow_shape.Dims(0));
TF_LITE_ENSURE_EQ(context, input_shape.Dims(1), flow_shape.Dims(1));
TF_LITE_ENSURE_EQ(context, input_shape.Dims(2), flow_shape.Dims(2));
TF_LITE_ENSURE_MSG(context, input_shape.Dims(1) >= 2,
"Image height must be at least 2.");
TF_LITE_ENSURE_MSG(context, input_shape.Dims(2) >= 2,
"Image width must be at least 2.");
TF_LITE_ENSURE_MSG(context, flow_shape.Dims(3) == 2,
"The last dimension of flow tensor must be 2.");
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* flow = GetInput(context, node, kFlowTensor);
TF_LITE_ENSURE(context, flow != nullptr);
DenseImageWarp(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(flow), GetTensorData<float>(flow),
GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* RegisterDenseImageWarp() {
static TfLiteRegistration reg = {nullptr,
nullptr, dense_image_warp::Prepare,
dense_image_warp::Eval};
return ®
}
TfLiteRegistration* Register_DENSE_IMAGE_WARP() {
return RegisterDenseImageWarp();
}
}
}
} | #include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class DenseImageWarpOpModel : public SingleOpModel {
public:
DenseImageWarpOpModel(const TensorData& input, const TensorData& flow,
const TensorData& output) {
input_ = AddInput(input);
flow_ = AddInput(flow);
output_ = AddOutput(output);
std::vector<uint8_t> custom_option;
SetCustomOp("DenseImageWarp", custom_option, RegisterDenseImageWarp);
BuildInterpreter({GetShape(input_), GetShape(flow_)});
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetFlow(const std::vector<float>& data) { PopulateTensor(flow_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int flow_;
int output_;
};
TEST(DenseImageWarpOpTest, MismatchedSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 2, 2}},
{TensorType_FLOAT32, {}});
, "input_shape.Dims.2. != flow_shape.Dims.2. .4 != 2.");
}
TEST(DenseImageWarpOpTest, WrongFlowSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {}});
, "The last dimension of flow tensor must be 2.");
}
TEST(DenseImageWarpOpTest, SimpleTest) {
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 4, 2}},
{TensorType_FLOAT32, {}});
model.SetInput({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15});
model.SetFlow({4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0,
2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 0, 0, 0, 3, 3, 0, 3, 2, 0,
0, 3, 12, 15, 12, 0}));
}
TEST(DenseImageWarpOpTest, RoundTest) {
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32, {1, 4, 4, 2}},
{TensorType_FLOAT32, {}});
model.SetInput({0.2, 1.5, 2.4, 3.5, 4.6, 5.1, 6.3, 7.2, 8.5, 9.6, 10.9, 11.6,
12.8, 13.2, 14.4, 15.5});
model.SetFlow({4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0,
2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({0.2, 0.2, 0.2, 0.2, 3.5, 3.5, 0.2, 3.5, 2.4,
0.2, 0.2, 3.5, 12.8, 15.5, 12.8, 0.2}));
}
TEST(DenseImageWarpOpTest, WithBatchandChannelTest) {
DenseImageWarpOpModel model(
{TensorType_FLOAT32, {2, 4, 4, 3}},
{TensorType_FLOAT32, {2, 4, 4, 2}},
{TensorType_FLOAT32, {}});
std::vector<float> input_data;
for (int i = 0; i < 96; ++i) input_data.push_back(i);
model.SetInput(input_data);
model.SetFlow({2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6,
4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0,
2, -2, 10, 6, 4, 4, 2, -4, -4, 10, -4, -4, -2, 6, 4, 6,
4, 10, 6, 10, 4, 2, 6, 6, 10, -4, 2, -2, 6, 8, 6, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4, 4, 3}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({6, 7, 8, 0, 1, 2, 0, 1, 2, 9, 10, 11, 36, 37,
38, 45, 46, 47, 36, 37, 38, 0, 1, 2, 0, 1, 2, 0,
1, 2, 0, 1, 2, 0, 1, 2, 9, 10, 11, 21, 22, 23,
0, 1, 2, 9, 10, 11, 54, 55, 56, 48, 49, 50, 48, 49,
50, 57, 58, 59, 84, 85, 86, 93, 94, 95, 84, 85, 86, 48,
49, 50, 48, 49, 50, 48, 49, 50, 48, 49, 50, 48, 49, 50,
57, 58, 59, 69, 70, 71, 48, 49, 50, 57, 58, 59}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/dense_image_warp.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/dense_image_warp_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d8752e4-7dbe-4dac-8754-ecd8df904db3 | cpp | abseil/abseil-cpp | inlined_vector | absl/container/internal/inlined_vector.h | absl/container/inlined_vector_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_
#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_
#include <algorithm>
#include <cstddef>
#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/identity.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inlined_vector_internal {
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
template <typename A>
using AllocatorTraits = std::allocator_traits<A>;
template <typename A>
using ValueType = typename AllocatorTraits<A>::value_type;
template <typename A>
using SizeType = typename AllocatorTraits<A>::size_type;
template <typename A>
using Pointer = typename AllocatorTraits<A>::pointer;
template <typename A>
using ConstPointer = typename AllocatorTraits<A>::const_pointer;
template <typename A>
using SizeType = typename AllocatorTraits<A>::size_type;
template <typename A>
using DifferenceType = typename AllocatorTraits<A>::difference_type;
template <typename A>
using Reference = ValueType<A>&;
template <typename A>
using ConstReference = const ValueType<A>&;
template <typename A>
using Iterator = Pointer<A>;
template <typename A>
using ConstIterator = ConstPointer<A>;
template <typename A>
using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
template <typename A>
using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
template <typename A>
using MoveIterator = typename std::move_iterator<Iterator<A>>;
template <typename Iterator>
using IsAtLeastForwardIterator = std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>;
template <typename A>
using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
template <typename A>
using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
template <typename A, bool IsTriviallyDestructible =
absl::is_trivially_destructible<ValueType<A>>::value>
struct DestroyAdapter;
template <typename A>
struct DestroyAdapter<A, false> {
static void DestroyElements(A& allocator, Pointer<A> destroy_first,
SizeType<A> destroy_size) {
for (SizeType<A> i = destroy_size; i != 0;) {
--i;
AllocatorTraits<A>::destroy(allocator, destroy_first + i);
}
}
};
template <typename A>
struct DestroyAdapter<A, true> {
static void DestroyElements(A& allocator, Pointer<A> destroy_first,
SizeType<A> destroy_size) {
static_cast<void>(allocator);
static_cast<void>(destroy_first);
static_cast<void>(destroy_size);
}
};
template <typename A>
struct Allocation {
Pointer<A> data = nullptr;
SizeType<A> capacity = 0;
};
template <typename A,
bool IsOverAligned =
(alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
struct MallocAdapter {
static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
requested_capacity};
}
static void Deallocate(A& allocator, Pointer<A> pointer,
SizeType<A> capacity) {
AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
}
};
template <typename A, typename ValueAdapter>
void ConstructElements(absl::internal::type_identity_t<A>& allocator,
Pointer<A> construct_first, ValueAdapter& values,
SizeType<A> construct_size) {
for (SizeType<A> i = 0; i < construct_size; ++i) {
ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
ABSL_INTERNAL_CATCH_ANY {
DestroyAdapter<A>::DestroyElements(allocator, construct_first, i);
ABSL_INTERNAL_RETHROW;
}
}
}
template <typename A, typename ValueAdapter>
void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
SizeType<A> assign_size) {
for (SizeType<A> i = 0; i < assign_size; ++i) {
values.AssignNext(assign_first + i);
}
}
template <typename A>
struct StorageView {
Pointer<A> data;
SizeType<A> size;
SizeType<A> capacity;
};
template <typename A, typename Iterator>
class IteratorValueAdapter {
public:
explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
void ConstructNext(A& allocator, Pointer<A> construct_at) {
AllocatorTraits<A>::construct(allocator, construct_at, *it_);
++it_;
}
void AssignNext(Pointer<A> assign_at) {
*assign_at = *it_;
++it_;
}
private:
Iterator it_;
};
template <typename A>
class CopyValueAdapter {
public:
explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
void ConstructNext(A& allocator, Pointer<A> construct_at) {
AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
}
void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
private:
ConstPointer<A> ptr_;
};
template <typename A>
class DefaultValueAdapter {
public:
explicit DefaultValueAdapter() {}
void ConstructNext(A& allocator, Pointer<A> construct_at) {
AllocatorTraits<A>::construct(allocator, construct_at);
}
void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
};
template <typename A>
class AllocationTransaction {
public:
explicit AllocationTransaction(A& allocator)
: allocator_data_(allocator, nullptr), capacity_(0) {}
~AllocationTransaction() {
if (DidAllocate()) {
MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
}
}
AllocationTransaction(const AllocationTransaction&) = delete;
void operator=(const AllocationTransaction&) = delete;
A& GetAllocator() { return allocator_data_.template get<0>(); }
Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
SizeType<A>& GetCapacity() { return capacity_; }
bool DidAllocate() { return GetData() != nullptr; }
Pointer<A> Allocate(SizeType<A> requested_capacity) {
Allocation<A> result =
MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
GetData() = result.data;
GetCapacity() = result.capacity;
return result.data;
}
ABSL_MUST_USE_RESULT Allocation<A> Release() && {
Allocation<A> result = {GetData(), GetCapacity()};
Reset();
return result;
}
private:
void Reset() {
GetData() = nullptr;
GetCapacity() = 0;
}
container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
SizeType<A> capacity_;
};
template <typename A>
class ConstructionTransaction {
public:
explicit ConstructionTransaction(A& allocator)
: allocator_data_(allocator, nullptr), size_(0) {}
~ConstructionTransaction() {
if (DidConstruct()) {
DestroyAdapter<A>::DestroyElements(GetAllocator(), GetData(), GetSize());
}
}
ConstructionTransaction(const ConstructionTransaction&) = delete;
void operator=(const ConstructionTransaction&) = delete;
A& GetAllocator() { return allocator_data_.template get<0>(); }
Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
SizeType<A>& GetSize() { return size_; }
bool DidConstruct() { return GetData() != nullptr; }
template <typename ValueAdapter>
void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
ConstructElements<A>(GetAllocator(), data, values, size);
GetData() = data;
GetSize() = size;
}
void Commit() && {
GetData() = nullptr;
GetSize() = 0;
}
private:
container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
SizeType<A> size_;
};
template <typename T, size_t N, typename A>
class Storage {
public:
struct MemcpyPolicy {};
struct ElementwiseAssignPolicy {};
struct ElementwiseSwapPolicy {};
struct ElementwiseConstructPolicy {};
using MoveAssignmentPolicy = absl::conditional_t<
absl::conjunction<absl::is_trivially_move_assignable<ValueType<A>>,
absl::is_trivially_destructible<ValueType<A>>,
std::is_same<A, std::allocator<ValueType<A>>>>::value,
MemcpyPolicy,
absl::conditional_t<IsMoveAssignOk<A>::value, ElementwiseAssignPolicy,
ElementwiseConstructPolicy>>;
using SwapInlinedElementsPolicy = absl::conditional_t<
absl::conjunction<absl::is_trivially_relocatable<ValueType<A>>,
std::is_same<A, std::allocator<ValueType<A>>>>::value,
MemcpyPolicy,
absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
ElementwiseConstructPolicy>>;
static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
return current_capacity * 2;
}
static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
SizeType<A> requested_capacity) {
return (std::max)(NextCapacity(current_capacity), requested_capacity);
}
Storage() : metadata_(A(), 0u) {}
explicit Storage(const A& allocator)
: metadata_(allocator, 0u) {}
~Storage() {
if (GetSizeAndIsAllocated() == 0) {
return;
}
if (absl::is_trivially_destructible<ValueType<A>>::value &&
std::is_same<A, std::allocator<ValueType<A>>>::value) {
DeallocateIfAllocated();
return;
}
DestroyContents();
}
SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
const SizeType<A>& GetSizeAndIsAllocated() const {
return metadata_.template get<1>();
}
SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
Pointer<A> GetAllocatedData() {
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
return data_.allocated.allocated_data;
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
}
ConstPointer<A> GetAllocatedData() const {
return data_.allocated.allocated_data;
}
ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer<A> GetInlinedData() {
return reinterpret_cast<Pointer<A>>(data_.inlined.inlined_data);
}
ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer<A> GetInlinedData() const {
return reinterpret_cast<ConstPointer<A>>(data_.inlined.inlined_data);
}
SizeType<A> GetAllocatedCapacity() const {
return data_.allocated.allocated_capacity;
}
SizeType<A> GetInlinedCapacity() const {
return static_cast<SizeType<A>>(kOptimalInlinedSize);
}
StorageView<A> MakeStorageView() {
return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
GetAllocatedCapacity()}
: StorageView<A>{GetInlinedData(), GetSize(),
GetInlinedCapacity()};
}
A& GetAllocator() { return metadata_.template get<0>(); }
const A& GetAllocator() const { return metadata_.template get<0>(); }
ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
template <typename ValueAdapter>
void Initialize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
void Assign(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
void Resize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
SizeType<A> insert_count);
template <typename... Args>
Reference<A> EmplaceBack(Args&&... args);
Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
void Reserve(SizeType<A> requested_capacity);
void ShrinkToFit();
void Swap(Storage* other_storage_ptr);
void SetIsAllocated() {
GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
}
void UnsetIsAllocated() {
GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
}
void SetSize(SizeType<A> size) {
GetSizeAndIsAllocated() =
(size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
}
void SetAllocatedSize(SizeType<A> size) {
GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
}
void SetInlinedSize(SizeType<A> size) {
GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
}
void AddSize(SizeType<A> count) {
GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
}
void SubtractSize(SizeType<A> count) {
ABSL_HARDENING_ASSERT(count <= GetSize());
GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
void SetAllocation(Allocation<A> allocation) {
data_.allocated.allocated_data = allocation.data;
data_.allocated.allocated_capacity = allocation.capacity;
}
void MemcpyFrom(const Storage& other_storage) {
{
using V = ValueType<A>;
ABSL_HARDENING_ASSERT(
other_storage.GetIsAllocated() ||
(std::is_same<A, std::allocator<V>>::value &&
(
absl::is_trivially_relocatable<V>::value ||
(absl::is_trivially_move_assignable<V>::value &&
absl::is_trivially_destructible<V>::value) ||
(absl::is_trivially_copy_constructible<V>::value ||
absl::is_trivially_copy_assignable<V>::value))));
}
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
}
void DeallocateIfAllocated() {
if (GetIsAllocated()) {
MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
GetAllocatedCapacity());
}
}
private:
ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
struct Allocated {
Pointer<A> allocated_data;
SizeType<A> allocated_capacity;
};
static constexpr size_t kOptimalInlinedSize =
(std::max)(N, sizeof(Allocated) / sizeof(ValueType<A>));
struct Inlined {
alignas(ValueType<A>) char inlined_data[sizeof(
ValueType<A>[kOptimalInlinedSize])];
};
union Data {
Allocated allocated;
Inlined inlined;
};
void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType<A> n);
void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType<A> n);
void SwapInlinedElements(MemcpyPolicy, Storage* other);
template <typename NotMemcpyPolicy>
void SwapInlinedElements(NotMemcpyPolicy, Storage* other);
template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
Metadata metadata_;
Data data_;
};
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
DestroyAdapter<A>::DestroyElements(GetAllocator(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
const SizeType<A> n = other.GetSize();
ABSL_HARDENING_ASSERT(n > 0);
ConstPointer<A> src;
Pointer<A> dst;
if (!other.GetIsAllocated()) {
dst = GetInlinedData();
src = other.GetInlinedData();
} else {
SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
Allocation<A> allocation =
MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
SetAllocation(allocation);
dst = allocation.data;
src = other.GetAllocatedData();
}
if (absl::is_trivially_copy_constructible<ValueType<A>>::value &&
std::is_same<A, std::allocator<ValueType<A>>>::value) {
std::memcpy(reinterpret_cast<char*>(dst),
reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
} else {
auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
ConstructElements<A>(GetAllocator(), dst, values, n);
}
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values,
SizeType<A> new_size) -> void {
ABSL_HARDENING_ASSERT(!GetIsAllocated());
ABSL_HARDENING_ASSERT(GetSize() == 0);
Pointer<A> construct_data;
if (new_size > GetInlinedCapacity()) {
SizeType<A> requested_capacity =
ComputeCapacity(GetInlinedCapacity(), new_size);
Allocation<A> allocation =
MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
construct_data = allocation.data;
SetAllocation(allocation);
SetIsAllocated();
} else {
construct_data = GetInlinedData();
}
ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
AddSize(new_size);
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Assign(ValueAdapter values,
SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
AllocationTransaction<A> allocation_tx(GetAllocator());
absl::Span<ValueType<A>> assign_loop;
absl::Span<ValueType<A>> construct_loop;
absl::Span<ValueType<A>> destroy_loop;
if (new_size > storage_view.capacity) {
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
assign_loop = {storage_view.data, storage_view.size};
construct_loop = {storage_view.data + storage_view.size,
new_size - storage_view.size};
} else {
assign_loop = {storage_view.data, new_size};
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
AssignElements<A>(assign_loop.data(), values, assign_loop.size());
ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
construct_loop.size());
DestroyAdapter<A>::DestroyElements(GetAllocator(), destroy_loop.data(),
destroy_loop.size());
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
SetSize(new_size);
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Resize(ValueAdapter values,
SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
Pointer<A> const base = storage_view.data;
const SizeType<A> size = storage_view.size;
A& alloc = GetAllocator();
if (new_size <= size) {
DestroyAdapter<A>::DestroyElements(alloc, base + new_size, size - new_size);
} else if (new_size <= storage_view.capacity) {
ConstructElements<A>(alloc, base + size, values, new_size - size);
} else {
AllocationTransaction<A> allocation_tx(alloc);
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
ConstructionTransaction<A> construction_tx(alloc);
construction_tx.Construct(new_data + size, values, new_size - size);
IteratorValueAdapter<A, MoveIterator<A>> move_values(
(MoveIterator<A>(base)));
ConstructElements<A>(alloc, new_data, move_values, size);
DestroyAdapter<A>::DestroyElements(alloc, base, size);
std::move(construction_tx).Commit();
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
SetSize(new_size);
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
SizeType<A> insert_count) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
auto insert_index = static_cast<SizeType<A>>(
std::distance(ConstIterator<A>(storage_view.data), pos));
SizeType<A> insert_end_index = insert_index + insert_count;
SizeType<A> new_size = storage_view.size + insert_count;
if (new_size > storage_view.capacity) {
AllocationTransaction<A> allocation_tx(GetAllocator());
ConstructionTransaction<A> construction_tx(GetAllocator());
ConstructionTransaction<A> move_construction_tx(GetAllocator());
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
SizeType<A> requested_capacity =
ComputeCapacity(storage_view.capacity, new_size);
Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
construction_tx.Construct(new_data + insert_index, values, insert_count);
move_construction_tx.Construct(new_data, move_values, insert_index);
ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
move_values, storage_view.size - insert_index);
DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
storage_view.size);
std::move(construction_tx).Commit();
std::move(move_construction_tx).Commit();
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
SetAllocatedSize(new_size);
return Iterator<A>(new_data + insert_index);
} else {
SizeType<A> move_construction_destination_index =
(std::max)(insert_end_index, storage_view.size);
ConstructionTransaction<A> move_construction_tx(GetAllocator());
IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
MoveIterator<A>(storage_view.data +
(move_construction_destination_index - insert_count)));
absl::Span<ValueType<A>> move_construction = {
storage_view.data + move_construction_destination_index,
new_size - move_construction_destination_index};
Pointer<A> move_assignment_values = storage_view.data + insert_index;
absl::Span<ValueType<A>> move_assignment = {
storage_view.data + insert_end_index,
move_construction_destination_index - insert_end_index};
absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
move_construction.size()};
absl::Span<ValueType<A>> insert_construction = {
insert_assignment.data() + insert_assignment.size(),
insert_count - insert_assignment.size()};
move_construction_tx.Construct(move_construction.data(),
move_construction_values,
move_construction.size());
for (Pointer<A>
destination = move_assignment.data() + move_assignment.size(),
last_destination = move_assignment.data(),
source = move_assignment_values + move_assignment.size();
;) {
--destination;
--source;
if (destination < last_destination) break;
*destination = std::move(*source);
}
AssignElements<A>(insert_assignment.data(), values,
insert_assignment.size());
ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
insert_construction.size());
std::move(move_construction_tx).Commit();
AddSize(insert_count);
return Iterator<A>(storage_view.data + insert_index);
}
}
template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
const SizeType<A> n = storage_view.size;
if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
Pointer<A> last_ptr = storage_view.data + n;
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
AddSize(1);
return *last_ptr;
}
return EmplaceBackSlow(std::forward<Args>(args)...);
}
template <typename T, size_t N, typename A>
template <typename... Args>
auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
StorageView<A> storage_view = MakeStorageView();
AllocationTransaction<A> allocation_tx(GetAllocator());
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
Pointer<A> last_ptr = construct_data + storage_view.size;
AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
std::forward<Args>(args)...);
ABSL_INTERNAL_TRY {
ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
ABSL_INTERNAL_RETHROW;
}
DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
AddSize(1);
return *last_ptr;
}
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Erase(ConstIterator<A> from,
ConstIterator<A> to) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();
auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));
auto erase_index = static_cast<SizeType<A>>(
std::distance(ConstIterator<A>(storage_view.data), from));
SizeType<A> erase_end_index = erase_index + erase_size;
if (absl::is_trivially_relocatable<ValueType<A>>::value &&
std::is_nothrow_destructible<ValueType<A>>::value &&
std::is_same<A, std::allocator<ValueType<A>>>::value) {
DestroyAdapter<A>::DestroyElements(
GetAllocator(), storage_view.data + erase_index, erase_size);
std::memmove(
reinterpret_cast<char*>(storage_view.data + erase_index),
reinterpret_cast<const char*>(storage_view.data + erase_end_index),
(storage_view.size - erase_end_index) * sizeof(ValueType<A>));
} else {
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data + erase_end_index));
AssignElements<A>(storage_view.data + erase_index, move_values,
storage_view.size - erase_end_index);
DestroyAdapter<A>::DestroyElements(
GetAllocator(), storage_view.data + (storage_view.size - erase_size),
erase_size);
}
SubtractSize(erase_size);
return Iterator<A>(storage_view.data + erase_index);
}
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
StorageView<A> storage_view = MakeStorageView();
if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
AllocationTransaction<A> allocation_tx(GetAllocator());
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
SizeType<A> new_requested_capacity =
ComputeCapacity(storage_view.capacity, requested_capacity);
Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
ConstructElements<A>(GetAllocator(), new_data, move_values,
storage_view.size);
DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
storage_view.size);
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::ShrinkToFit() -> void {
ABSL_HARDENING_ASSERT(GetIsAllocated());
StorageView<A> storage_view{GetAllocatedData(), GetSize(),
GetAllocatedCapacity()};
if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
AllocationTransaction<A> allocation_tx(GetAllocator());
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
Pointer<A> construct_data;
if (storage_view.size > GetInlinedCapacity()) {
SizeType<A> requested_capacity = storage_view.size;
construct_data = allocation_tx.Allocate(requested_capacity);
if (allocation_tx.GetCapacity() >= storage_view.capacity) {
return;
}
} else {
construct_data = GetInlinedData();
}
ABSL_INTERNAL_TRY {
ConstructElements<A>(GetAllocator(), construct_data, move_values,
storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
SetAllocation({storage_view.data, storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
storage_view.size);
MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
storage_view.capacity);
if (allocation_tx.DidAllocate()) {
SetAllocation(std::move(allocation_tx).Release());
} else {
UnsetIsAllocated();
}
}
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
using std::swap;
ABSL_HARDENING_ASSERT(this != other_storage_ptr);
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
} else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr);
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
StorageView<A> allocated_storage_view{
allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
allocated_ptr->GetAllocatedCapacity()};
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(inlined_ptr->GetInlinedData()));
ABSL_INTERNAL_TRY {
ConstructElements<A>(inlined_ptr->GetAllocator(),
allocated_ptr->GetInlinedData(), move_values,
inlined_ptr->GetSize());
}
ABSL_INTERNAL_CATCH_ANY {
allocated_ptr->SetAllocation(Allocation<A>{
allocated_storage_view.data, allocated_storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
DestroyAdapter<A>::DestroyElements(inlined_ptr->GetAllocator(),
inlined_ptr->GetInlinedData(),
inlined_ptr->GetSize());
inlined_ptr->SetAllocation(Allocation<A>{allocated_storage_view.data,
allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::SwapN(ElementwiseSwapPolicy, Storage* other,
SizeType<A> n) {
std::swap_ranges(GetInlinedData(), GetInlinedData() + n,
other->GetInlinedData());
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::SwapN(ElementwiseConstructPolicy, Storage* other,
SizeType<A> n) {
Pointer<A> a = GetInlinedData();
Pointer<A> b = other->GetInlinedData();
A& allocator_a = GetAllocator();
A& allocator_b = other->GetAllocator();
for (SizeType<A> i = 0; i < n; ++i, ++a, ++b) {
ValueType<A> tmp(std::move(*a));
AllocatorTraits<A>::destroy(allocator_a, a);
AllocatorTraits<A>::construct(allocator_b, a, std::move(*b));
AllocatorTraits<A>::destroy(allocator_b, b);
AllocatorTraits<A>::construct(allocator_a, b, std::move(tmp));
}
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::SwapInlinedElements(MemcpyPolicy, Storage* other) {
Data tmp = data_;
data_ = other->data_;
other->data_ = tmp;
}
template <typename T, size_t N, typename A>
template <typename NotMemcpyPolicy>
void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy,
Storage* other) {
Storage* small_ptr = this;
Storage* large_ptr = other;
if (small_ptr->GetSize() > large_ptr->GetSize()) {
std::swap(small_ptr, large_ptr);
}
auto small_size = small_ptr->GetSize();
auto diff = large_ptr->GetSize() - small_size;
SwapN(policy, other, small_size);
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(large_ptr->GetInlinedData() + small_size));
ConstructElements<A>(large_ptr->GetAllocator(),
small_ptr->GetInlinedData() + small_size, move_values,
diff);
DestroyAdapter<A>::DestroyElements(large_ptr->GetAllocator(),
large_ptr->GetInlinedData() + small_size,
diff);
}
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/inlined_vector.h"
#include <algorithm>
#include <cstddef>
#include <forward_list>
#include <iterator>
#include <list>
#include <memory>
#include <scoped_allocator>
#include <sstream>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/exception_testing.h"
#include "absl/base/macros.h"
#include "absl/base/options.h"
#include "absl/container/internal/test_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/hash/hash_testing.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
namespace {
using absl::container_internal::CountingAllocator;
using absl::test_internal::CopyableMovableInstance;
using absl::test_internal::CopyableOnlyInstance;
using absl::test_internal::InstanceTracker;
using testing::AllOf;
using testing::Each;
using testing::ElementsAre;
using testing::ElementsAreArray;
using testing::Eq;
using testing::Gt;
using testing::Pointee;
using testing::Pointwise;
using testing::PrintToString;
using testing::SizeIs;
using IntVec = absl::InlinedVector<int, 8>;
MATCHER_P(CapacityIs, n, "") {
return testing::ExplainMatchResult(n, arg.capacity(), result_listener);
}
MATCHER_P(ValueIs, e, "") {
return testing::ExplainMatchResult(e, arg.value(), result_listener);
}
template <typename T>
class InstanceTest : public ::testing::Test {};
TYPED_TEST_SUITE_P(InstanceTest);
class RefCounted {
public:
RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
Ref();
}
~RefCounted() {
Unref();
count_ = nullptr;
}
friend void swap(RefCounted& a, RefCounted& b) {
using std::swap;
swap(a.value_, b.value_);
swap(a.count_, b.count_);
}
RefCounted& operator=(RefCounted v) {
using std::swap;
swap(*this, v);
return *this;
}
void Ref() const {
CHECK_NE(count_, nullptr);
++(*count_);
}
void Unref() const {
--(*count_);
CHECK_GE(*count_, 0);
}
int value_;
int* count_;
};
using RefCountedVec = absl::InlinedVector<RefCounted, 8>;
class Dynamic {
public:
virtual ~Dynamic() {}
};
using DynamicVec = absl::InlinedVector<Dynamic, 8>;
template <typename Container>
static void Fill(Container* v, size_t len, int offset = 0) {
for (size_t i = 0; i < len; i++) {
v->push_back(static_cast<int>(i) + offset);
}
}
static IntVec Fill(size_t len, int offset = 0) {
IntVec v;
Fill(&v, len, offset);
return v;
}
TEST(IntVec, SimpleOps) {
for (size_t len = 0; len < 20; len++) {
IntVec v;
const IntVec& cv = v;
Fill(&v, len);
EXPECT_EQ(len, v.size());
EXPECT_LE(len, v.capacity());
for (size_t i = 0; i < len; i++) {
EXPECT_EQ(static_cast<int>(i), v[i]);
EXPECT_EQ(static_cast<int>(i), v.at(i));
}
EXPECT_EQ(v.begin(), v.data());
EXPECT_EQ(cv.begin(), cv.data());
size_t counter = 0;
for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) {
EXPECT_EQ(static_cast<int>(counter), *iter);
counter++;
}
EXPECT_EQ(counter, len);
counter = 0;
for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) {
EXPECT_EQ(static_cast<int>(counter), *iter);
counter++;
}
EXPECT_EQ(counter, len);
counter = 0;
for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) {
EXPECT_EQ(static_cast<int>(counter), *iter);
counter++;
}
EXPECT_EQ(counter, len);
if (len > 0) {
EXPECT_EQ(0, v.front());
EXPECT_EQ(static_cast<int>(len - 1), v.back());
v.pop_back();
EXPECT_EQ(len - 1, v.size());
for (size_t i = 0; i < v.size(); ++i) {
EXPECT_EQ(static_cast<int>(i), v[i]);
EXPECT_EQ(static_cast<int>(i), v.at(i));
}
}
}
}
TEST(IntVec, PopBackNoOverflow) {
IntVec v = {1};
v.pop_back();
EXPECT_EQ(v.size(), 0u);
}
TEST(IntVec, AtThrows) {
IntVec v = {1, 2, 3};
EXPECT_EQ(v.at(2), 3);
ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range,
"failed bounds check");
}
TEST(IntVec, ReverseIterator) {
for (size_t len = 0; len < 20; len++) {
IntVec v;
Fill(&v, len);
size_t counter = len;
for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) {
counter--;
EXPECT_EQ(static_cast<int>(counter), *iter);
}
EXPECT_EQ(counter, 0u);
counter = len;
for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend();
++iter) {
counter--;
EXPECT_EQ(static_cast<int>(counter), *iter);
}
EXPECT_EQ(counter, 0u);
counter = len;
for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend();
++iter) {
counter--;
EXPECT_EQ(static_cast<int>(counter), *iter);
}
EXPECT_EQ(counter, 0u);
}
}
TEST(IntVec, Erase) {
for (size_t len = 1; len < 20; len++) {
for (size_t i = 0; i < len; ++i) {
IntVec v;
Fill(&v, len);
v.erase(v.begin() + i);
EXPECT_EQ(len - 1, v.size());
for (size_t j = 0; j < i; ++j) {
EXPECT_EQ(static_cast<int>(j), v[j]);
}
for (size_t j = i; j < len - 1; ++j) {
EXPECT_EQ(static_cast<int>(j + 1), v[j]);
}
}
}
}
TEST(IntVec, Hardened) {
IntVec v;
Fill(&v, 10);
EXPECT_EQ(v[9], 9);
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
EXPECT_DEATH_IF_SUPPORTED(v[10], "");
EXPECT_DEATH_IF_SUPPORTED(v[static_cast<size_t>(-1)], "");
EXPECT_DEATH_IF_SUPPORTED(v.resize(v.max_size() + 1), "");
#endif
}
TEST(UniquePtr, MoveConstruct) {
for (size_t size = 0; size < 16; ++size) {
SCOPED_TRACE(size);
absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
for (size_t i = 0; i < size; ++i) {
a.push_back(std::make_unique<size_t>(i));
}
absl::InlinedVector<std::unique_ptr<size_t>, 2> b(std::move(a));
ASSERT_THAT(b, SizeIs(size));
for (size_t i = 0; i < size; ++i) {
ASSERT_THAT(b[i], Pointee(i));
}
}
}
TEST(UniquePtr, MoveAssign) {
for (size_t size = 0; size < 16; ++size) {
SCOPED_TRACE(size);
absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
for (size_t i = 0; i < size; ++i) {
a.push_back(std::make_unique<size_t>(i));
}
absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
b = std::move(a);
ASSERT_THAT(b, SizeIs(size));
for (size_t i = 0; i < size; ++i) {
ASSERT_THAT(b[i], Pointee(i));
}
}
}
TEST(UniquePtr, Swap) {
for (size_t size1 = 0; size1 < 5; ++size1) {
for (size_t size2 = 0; size2 < 5; ++size2) {
absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
for (size_t i = 0; i < size1; ++i) {
a.push_back(std::make_unique<size_t>(i + 10));
}
for (size_t i = 0; i < size2; ++i) {
b.push_back(std::make_unique<size_t>(i + 20));
}
a.swap(b);
ASSERT_THAT(a, SizeIs(size2));
ASSERT_THAT(b, SizeIs(size1));
for (size_t i = 0; i < a.size(); ++i) {
ASSERT_THAT(a[i], Pointee(i + 20));
}
for (size_t i = 0; i < b.size(); ++i) {
ASSERT_THAT(b[i], Pointee(i + 10));
}
}
}
}
TEST(UniquePtr, EraseSingle) {
for (size_t size = 4; size < 16; ++size) {
absl::InlinedVector<std::unique_ptr<size_t>, 8> a;
for (size_t i = 0; i < size; ++i) {
a.push_back(std::make_unique<size_t>(i));
}
a.erase(a.begin());
ASSERT_THAT(a, SizeIs(size - 1));
for (size_t i = 0; i < size - 1; ++i) {
ASSERT_THAT(a[i], Pointee(i + 1));
}
a.erase(a.begin() + 2);
ASSERT_THAT(a, SizeIs(size - 2));
ASSERT_THAT(a[0], Pointee(1));
ASSERT_THAT(a[1], Pointee(2));
for (size_t i = 2; i < size - 2; ++i) {
ASSERT_THAT(a[i], Pointee(i + 2));
}
}
}
TEST(UniquePtr, EraseMulti) {
for (size_t size = 5; size < 16; ++size) {
absl::InlinedVector<std::unique_ptr<size_t>, 8> a;
for (size_t i = 0; i < size; ++i) {
a.push_back(std::make_unique<size_t>(i));
}
a.erase(a.begin(), a.begin() + 2);
ASSERT_THAT(a, SizeIs(size - 2));
for (size_t i = 0; i < size - 2; ++i) {
ASSERT_THAT(a[i], Pointee(i + 2));
}
a.erase(a.begin() + 1, a.begin() + 3);
ASSERT_THAT(a, SizeIs(size - 4));
ASSERT_THAT(a[0], Pointee(2));
for (size_t i = 1; i < size - 4; ++i) {
ASSERT_THAT(a[i], Pointee(i + 4));
}
}
}
TEST(RefCountedVec, EraseBeginEnd) {
for (size_t len = 1; len < 20; ++len) {
for (size_t erase_begin = 0; erase_begin < len; ++erase_begin) {
for (size_t erase_end = erase_begin; erase_end <= len; ++erase_end) {
std::vector<int> counts(len, 0);
RefCountedVec v;
for (size_t i = 0; i < len; ++i) {
v.push_back(RefCounted(static_cast<int>(i), &counts[i]));
}
size_t erase_len = erase_end - erase_begin;
v.erase(v.begin() + erase_begin, v.begin() + erase_end);
EXPECT_EQ(len - erase_len, v.size());
for (size_t i = 0; i < erase_begin; ++i) {
EXPECT_EQ(static_cast<int>(i), v[i].value_);
}
for (size_t i = erase_begin; i < v.size(); ++i) {
EXPECT_EQ(static_cast<int>(i + erase_len), v[i].value_);
}
for (size_t i = 0; i < erase_begin; ++i) {
EXPECT_EQ(1, counts[i]);
}
for (size_t i = erase_begin; i < erase_end; ++i) {
EXPECT_EQ(0, counts[i]);
}
for (size_t i = erase_end; i < len; ++i) {
EXPECT_EQ(1, counts[i]);
}
}
}
}
}
struct NoDefaultCtor {
explicit NoDefaultCtor(int) {}
};
struct NoCopy {
NoCopy() {}
NoCopy(const NoCopy&) = delete;
};
struct NoAssign {
NoAssign() {}
NoAssign& operator=(const NoAssign&) = delete;
};
struct MoveOnly {
MoveOnly() {}
MoveOnly(MoveOnly&&) = default;
MoveOnly& operator=(MoveOnly&&) = default;
};
TEST(InlinedVectorTest, NoDefaultCtor) {
absl::InlinedVector<NoDefaultCtor, 1> v(10, NoDefaultCtor(2));
(void)v;
}
TEST(InlinedVectorTest, NoCopy) {
absl::InlinedVector<NoCopy, 1> v(10);
(void)v;
}
TEST(InlinedVectorTest, NoAssign) {
absl::InlinedVector<NoAssign, 1> v(10);
(void)v;
}
TEST(InlinedVectorTest, MoveOnly) {
absl::InlinedVector<MoveOnly, 2> v;
v.push_back(MoveOnly{});
v.push_back(MoveOnly{});
v.push_back(MoveOnly{});
v.erase(v.begin());
v.push_back(MoveOnly{});
v.erase(v.begin(), v.begin() + 1);
v.insert(v.begin(), MoveOnly{});
v.emplace(v.begin());
v.emplace(v.begin(), MoveOnly{});
}
TEST(InlinedVectorTest, Noexcept) {
EXPECT_TRUE(std::is_nothrow_move_constructible<IntVec>::value);
EXPECT_TRUE((std::is_nothrow_move_constructible<
absl::InlinedVector<MoveOnly, 2>>::value));
struct MoveCanThrow {
MoveCanThrow(MoveCanThrow&&) {}
};
EXPECT_EQ(absl::default_allocator_is_nothrow::value,
(std::is_nothrow_move_constructible<
absl::InlinedVector<MoveCanThrow, 2>>::value));
}
TEST(InlinedVectorTest, EmplaceBack) {
absl::InlinedVector<std::pair<std::string, int>, 1> v;
auto& inlined_element = v.emplace_back("answer", 42);
EXPECT_EQ(&inlined_element, &v[0]);
EXPECT_EQ(inlined_element.first, "answer");
EXPECT_EQ(inlined_element.second, 42);
auto& allocated_element = v.emplace_back("taxicab", 1729);
EXPECT_EQ(&allocated_element, &v[1]);
EXPECT_EQ(allocated_element.first, "taxicab");
EXPECT_EQ(allocated_element.second, 1729);
}
TEST(InlinedVectorTest, ShrinkToFitGrowingVector) {
absl::InlinedVector<std::pair<std::string, int>, 1> v;
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 1u);
v.emplace_back("answer", 42);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 1u);
v.emplace_back("taxicab", 1729);
EXPECT_GE(v.capacity(), 2u);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 2u);
v.reserve(100);
EXPECT_GE(v.capacity(), 100u);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 2u);
}
TEST(InlinedVectorTest, ShrinkToFitEdgeCases) {
{
absl::InlinedVector<std::pair<std::string, int>, 1> v;
v.emplace_back("answer", 42);
v.emplace_back("taxicab", 1729);
EXPECT_GE(v.capacity(), 2u);
v.pop_back();
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 1u);
EXPECT_EQ(v[0].first, "answer");
EXPECT_EQ(v[0].second, 42);
}
{
absl::InlinedVector<std::string, 2> v(100);
v.resize(0);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 2u);
}
{
absl::InlinedVector<std::string, 2> v(100);
v.resize(1);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 2u);
}
{
absl::InlinedVector<std::string, 2> v(100);
v.resize(2);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 2u);
}
{
absl::InlinedVector<std::string, 2> v(100);
v.resize(3);
v.shrink_to_fit();
EXPECT_EQ(v.capacity(), 3u);
}
}
TEST(IntVec, Insert) {
for (size_t len = 0; len < 20; len++) {
for (ptrdiff_t pos = 0; pos <= static_cast<ptrdiff_t>(len); pos++) {
{
std::vector<int> std_v;
Fill(&std_v, len);
IntVec v;
Fill(&v, len);
std_v.insert(std_v.begin() + pos, 9999);
IntVec::iterator it = v.insert(v.cbegin() + pos, 9999);
EXPECT_THAT(v, ElementsAreArray(std_v));
EXPECT_EQ(it, v.cbegin() + pos);
}
{
std::vector<int> std_v;
Fill(&std_v, len);
IntVec v;
Fill(&v, len);
IntVec::size_type n = 5;
std_v.insert(std_v.begin() + pos, n, 9999);
IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999);
EXPECT_THAT(v, ElementsAreArray(std_v));
EXPECT_EQ(it, v.cbegin() + pos);
}
{
std::vector<int> std_v;
Fill(&std_v, len);
IntVec v;
Fill(&v, len);
const std::vector<int> input = {9999, 8888, 7777};
std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend());
IntVec::iterator it =
v.insert(v.cbegin() + pos, input.cbegin(), input.cend());
EXPECT_THAT(v, ElementsAreArray(std_v));
EXPECT_EQ(it, v.cbegin() + pos);
}
{
std::vector<int> std_v;
Fill(&std_v, len);
IntVec v;
Fill(&v, len);
const std::forward_list<int> input = {9999, 8888, 7777};
std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend());
IntVec::iterator it =
v.insert(v.cbegin() + pos, input.cbegin(), input.cend());
EXPECT_THAT(v, ElementsAreArray(std_v));
EXPECT_EQ(it, v.cbegin() + pos);
}
{
std::vector<int> std_v;
Fill(&std_v, len);
IntVec v;
Fill(&v, len);
std_v.insert(std_v.begin() + pos, {9999, 8888, 7777});
std::istringstream input("9999 8888 7777");
IntVec::iterator it =
v.insert(v.cbegin() + pos, std::istream_iterator<int>(input),
std::istream_iterator<int>());
EXPECT_THAT(v, ElementsAreArray(std_v));
EXPECT_EQ(it, v.cbegin() + pos);
}
{
std::vector<int> std_v;
Fill(&std_v, len);
IntVec v;
Fill(&v, len);
std_v.insert(std_v.begin() + pos, {9999, 8888});
IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888});
EXPECT_THAT(v, ElementsAreArray(std_v));
EXPECT_EQ(it, v.cbegin() + pos);
}
}
}
}
TEST(RefCountedVec, InsertConstructorDestructor) {
for (size_t len = 0; len < 20; len++) {
SCOPED_TRACE(len);
for (size_t pos = 0; pos <= len; pos++) {
SCOPED_TRACE(pos);
std::vector<int> counts(len, 0);
int inserted_count = 0;
RefCountedVec v;
for (size_t i = 0; i < len; ++i) {
SCOPED_TRACE(i);
v.push_back(RefCounted(static_cast<int>(i), &counts[i]));
}
EXPECT_THAT(counts, Each(Eq(1)));
RefCounted insert_element(9999, &inserted_count);
EXPECT_EQ(1, inserted_count);
v.insert(v.begin() + pos, insert_element);
EXPECT_EQ(2, inserted_count);
EXPECT_THAT(counts, Each(Eq(1)));
EXPECT_EQ(2, inserted_count);
}
}
}
TEST(IntVec, Resize) {
for (size_t len = 0; len < 20; len++) {
IntVec v;
Fill(&v, len);
static const int kResizeElem = 1000000;
for (size_t k = 0; k < 10; k++) {
v.resize(len + k, kResizeElem);
EXPECT_EQ(len + k, v.size());
EXPECT_LE(len + k, v.capacity());
for (size_t i = 0; i < len + k; i++) {
if (i < len) {
EXPECT_EQ(static_cast<int>(i), v[i]);
} else {
EXPECT_EQ(kResizeElem, v[i]);
}
}
v.resize(len, kResizeElem);
EXPECT_EQ(len, v.size());
EXPECT_LE(len, v.capacity());
for (size_t i = 0; i < len; i++) {
EXPECT_EQ(static_cast<int>(i), v[i]);
}
}
}
}
TEST(IntVec, InitWithLength) {
for (size_t len = 0; len < 20; len++) {
IntVec v(len, 7);
EXPECT_EQ(len, v.size());
EXPECT_LE(len, v.capacity());
for (size_t i = 0; i < len; i++) {
EXPECT_EQ(7, v[i]);
}
}
}
TEST(IntVec, CopyConstructorAndAssignment) {
for (size_t len = 0; len < 20; len++) {
IntVec v;
Fill(&v, len);
EXPECT_EQ(len, v.size());
EXPECT_LE(len, v.capacity());
IntVec v2(v);
EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2);
for (size_t start_len = 0; start_len < 20; start_len++) {
IntVec v3;
Fill(&v3, start_len, 99);
v3 = v;
EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3);
}
}
}
TEST(IntVec, AliasingCopyAssignment) {
for (size_t len = 0; len < 20; ++len) {
IntVec original;
Fill(&original, len);
IntVec dup = original;
dup = *&dup;
EXPECT_EQ(dup, original);
}
}
TEST(IntVec, MoveConstructorAndAssignment) {
for (size_t len = 0; len < 20; len++) {
IntVec v_in;
const size_t inlined_capacity = v_in.capacity();
Fill(&v_in, len);
EXPECT_EQ(len, v_in.size());
EXPECT_LE(len, v_in.capacity());
{
IntVec v_temp(v_in);
auto* old_data = v_temp.data();
IntVec v_out(std::move(v_temp));
EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out);
if (v_in.size() > inlined_capacity) {
EXPECT_TRUE(v_out.data() == old_data);
} else {
EXPECT_FALSE(v_out.data() == old_data);
}
}
for (size_t start_len = 0; start_len < 20; start_len++) {
IntVec v_out;
Fill(&v_out, start_len, 99);
IntVec v_temp(v_in);
auto* old_data = v_temp.data();
v_out = std::move(v_temp);
EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out);
if (v_in.size() > inlined_capacity) {
EXPECT_TRUE(v_out.data() == old_data);
} else {
EXPECT_FALSE(v_out.data() == old_data);
}
}
}
}
class NotTriviallyDestructible {
public:
NotTriviallyDestructible() : p_(new int(1)) {}
explicit NotTriviallyDestructible(int i) : p_(new int(i)) {}
NotTriviallyDestructible(const NotTriviallyDestructible& other)
: p_(new int(*other.p_)) {}
NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) {
p_ = absl::make_unique<int>(*other.p_);
return *this;
}
bool operator==(const NotTriviallyDestructible& other) const {
return *p_ == *other.p_;
}
private:
std::unique_ptr<int> p_;
};
TEST(AliasingTest, Emplace) {
for (size_t i = 2; i < 20; ++i) {
absl::InlinedVector<NotTriviallyDestructible, 10> vec;
for (size_t j = 0; j < i; ++j) {
vec.push_back(NotTriviallyDestructible(static_cast<int>(j)));
}
vec.emplace(vec.begin(), vec[0]);
EXPECT_EQ(vec[0], vec[1]);
vec.emplace(vec.begin() + i / 2, vec[i / 2]);
EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]);
vec.emplace(vec.end() - 1, vec.back());
EXPECT_EQ(vec[vec.size() - 2], vec.back());
}
}
TEST(AliasingTest, InsertWithCount) {
for (size_t i = 1; i < 20; ++i) {
absl::InlinedVector<NotTriviallyDestructible, 10> vec;
for (size_t j = 0; j < i; ++j) {
vec.push_back(NotTriviallyDestructible(static_cast<int>(j)));
}
for (size_t n = 0; n < 5; ++n) {
vec.insert(vec.begin(), n, vec.back());
auto b = vec.begin();
EXPECT_TRUE(
std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) {
return x == vec.back();
}));
auto m_idx = vec.size() / 2;
vec.insert(vec.begin() + m_idx, n, vec.back());
auto m = vec.begin() + m_idx;
EXPECT_TRUE(
std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) {
return x == vec.back();
}));
auto old_e = vec.size() - 1;
auto val = vec[old_e];
vec.insert(vec.end(), n, vec[old_e]);
auto e = vec.begin() + old_e;
EXPECT_TRUE(std::all_of(
e, e + n,
[&val](const NotTriviallyDestructible& x) { return x == val; }));
}
}
}
TEST(OverheadTest, Storage) {
struct T {
void* val;
};
size_t expected_overhead = sizeof(T);
EXPECT_EQ((2 * expected_overhead),
sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7]));
EXPECT_EQ(expected_overhead,
sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8]));
}
TEST(IntVec, Clear) {
for (size_t len = 0; len < 20; len++) {
SCOPED_TRACE(len);
IntVec v;
Fill(&v, len);
v.clear();
EXPECT_EQ(0u, v.size());
EXPECT_EQ(v.begin(), v.end());
}
}
TEST(IntVec, Reserve) {
for (size_t len = 0; len < 20; len++) {
IntVec v;
Fill(&v, len);
for (size_t newlen = 0; newlen < 100; newlen++) {
const int* start_rep = v.data();
v.reserve(newlen);
const int* final_rep = v.data();
if (newlen <= len) {
EXPECT_EQ(start_rep, final_rep);
}
EXPECT_LE(newlen, v.capacity());
while (v.size() < newlen) {
v.push_back(0);
}
EXPECT_EQ(final_rep, v.data());
}
}
}
TEST(StringVec, SelfRefPushBack) {
std::vector<std::string> std_v;
absl::InlinedVector<std::string, 4> v;
const std::string s = "A quite long string to ensure heap.";
std_v.push_back(s);
v.push_back(s);
for (int i = 0; i < 20; ++i) {
EXPECT_THAT(v, ElementsAreArray(std_v));
v.push_back(v.back());
std_v.push_back(std_v.back());
}
EXPECT_THAT(v, ElementsAreArray(std_v));
}
TEST(StringVec, SelfRefPushBackWithMove) {
std::vector<std::string> std_v;
absl::InlinedVector<std::string, 4> v;
const std::string s = "A quite long string to ensure heap.";
std_v.push_back(s);
v.push_back(s);
for (int i = 0; i < 20; ++i) {
EXPECT_EQ(v.back(), std_v.back());
v.push_back(std::move(v.back()));
std_v.push_back(std::move(std_v.back()));
}
EXPECT_EQ(v.back(), std_v.back());
}
TEST(StringVec, SelfMove) {
const std::string s = "A quite long string to ensure heap.";
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
absl::InlinedVector<std::string, 8> v;
for (int i = 0; i < len; ++i) {
SCOPED_TRACE(i);
v.push_back(s);
}
v = std::move(*(&v));
std::vector<std::string> copy(v.begin(), v.end());
}
}
TEST(IntVec, Swap) {
for (size_t l1 = 0; l1 < 20; l1++) {
SCOPED_TRACE(l1);
for (size_t l2 = 0; l2 < 20; l2++) {
SCOPED_TRACE(l2);
IntVec a = Fill(l1, 0);
IntVec b = Fill(l2, 100);
{
using std::swap;
swap(a, b);
}
EXPECT_EQ(l1, b.size());
EXPECT_EQ(l2, a.size());
for (size_t i = 0; i < l1; i++) {
SCOPED_TRACE(i);
EXPECT_EQ(static_cast<int>(i), b[i]);
}
for (size_t i = 0; i < l2; i++) {
SCOPED_TRACE(i);
EXPECT_EQ(100 + static_cast<int>(i), a[i]);
}
}
}
}
TYPED_TEST_P(InstanceTest, Swap) {
using Instance = TypeParam;
using InstanceVec = absl::InlinedVector<Instance, 8>;
for (size_t l1 = 0; l1 < 20; l1++) {
SCOPED_TRACE(l1);
for (size_t l2 = 0; l2 < 20; l2++) {
SCOPED_TRACE(l2);
InstanceTracker tracker;
InstanceVec a, b;
const size_t inlined_capacity = a.capacity();
auto min_len = std::min(l1, l2);
auto max_len = std::max(l1, l2);
for (size_t i = 0; i < l1; i++)
a.push_back(Instance(static_cast<int>(i)));
for (size_t i = 0; i < l2; i++)
b.push_back(Instance(100 + static_cast<int>(i)));
EXPECT_EQ(tracker.instances(), static_cast<int>(l1 + l2));
tracker.ResetCopiesMovesSwaps();
{
using std::swap;
swap(a, b);
}
EXPECT_EQ(tracker.instances(), static_cast<int>(l1 + l2));
if (a.size() > inlined_capacity && b.size() > inlined_capacity) {
EXPECT_EQ(tracker.swaps(), 0);
EXPECT_EQ(tracker.moves(), 0);
} else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) {
EXPECT_EQ(tracker.swaps(), static_cast<int>(min_len));
EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()),
static_cast<int>(max_len - min_len));
} else {
EXPECT_EQ(tracker.swaps(), 0);
EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()),
static_cast<int>(min_len));
}
EXPECT_EQ(l1, b.size());
EXPECT_EQ(l2, a.size());
for (size_t i = 0; i < l1; i++) {
EXPECT_EQ(static_cast<int>(i), b[i].value());
}
for (size_t i = 0; i < l2; i++) {
EXPECT_EQ(100 + static_cast<int>(i), a[i].value());
}
}
}
}
TEST(IntVec, EqualAndNotEqual) {
IntVec a, b;
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
a.push_back(3);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
b.push_back(3);
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
b.push_back(7);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
a.push_back(6);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
a.clear();
b.clear();
for (size_t i = 0; i < 100; i++) {
a.push_back(static_cast<int>(i));
b.push_back(static_cast<int>(i));
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
b[i] = b[i] + 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
b[i] = b[i] - 1;
EXPECT_TRUE(a == b);
EXPECT_FALSE(a != b);
}
}
TEST(IntVec, RelationalOps) {
IntVec a, b;
EXPECT_FALSE(a < b);
EXPECT_FALSE(b < a);
EXPECT_FALSE(a > b);
EXPECT_FALSE(b > a);
EXPECT_TRUE(a <= b);
EXPECT_TRUE(b <= a);
EXPECT_TRUE(a >= b);
EXPECT_TRUE(b >= a);
b.push_back(3);
EXPECT_TRUE(a < b);
EXPECT_FALSE(b < a);
EXPECT_FALSE(a > b);
EXPECT_TRUE(b > a);
EXPECT_TRUE(a <= b);
EXPECT_FALSE(b <= a);
EXPECT_FALSE(a >= b);
EXPECT_TRUE(b >= a);
}
TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
using Instance = TypeParam;
using InstanceVec = absl::InlinedVector<Instance, 8>;
InstanceTracker tracker;
for (size_t len = 0; len < 20; len++) {
SCOPED_TRACE(len);
tracker.ResetCopiesMovesSwaps();
InstanceVec v;
const size_t inlined_capacity = v.capacity();
for (size_t i = 0; i < len; i++) {
v.push_back(Instance(static_cast<int>(i)));
}
EXPECT_EQ(tracker.instances(), static_cast<int>(len));
EXPECT_GE(tracker.copies() + tracker.moves(),
static_cast<int>(len));
tracker.ResetCopiesMovesSwaps();
tracker.ResetCopiesMovesSwaps();
v.resize(len + 10, Instance(100));
EXPECT_EQ(tracker.instances(), static_cast<int>(len) + 10);
if (len <= inlined_capacity && len + 10 > inlined_capacity) {
EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + static_cast<int>(len));
} else {
EXPECT_GE(tracker.copies() + tracker.moves(),
10);
}
tracker.ResetCopiesMovesSwaps();
v.resize(len, Instance(100));
EXPECT_EQ(tracker.instances(), static_cast<int>(len));
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.moves(), 0);
SCOPED_TRACE("reserve");
v.reserve(len + 1000);
EXPECT_EQ(tracker.instances(), static_cast<int>(len));
EXPECT_EQ(tracker.copies() + tracker.moves(), static_cast<int>(len));
if (len > 0) {
tracker.ResetCopiesMovesSwaps();
v.pop_back();
EXPECT_EQ(tracker.instances(), static_cast<int>(len) - 1);
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.moves(), 0);
if (!v.empty()) {
tracker.ResetCopiesMovesSwaps();
v.erase(v.begin());
EXPECT_EQ(tracker.instances(), static_cast<int>(len) - 2);
EXPECT_EQ(tracker.copies() + tracker.moves(),
static_cast<int>(len) - 2);
}
}
tracker.ResetCopiesMovesSwaps();
int instances_before_empty_erase = tracker.instances();
v.erase(v.begin(), v.begin());
EXPECT_EQ(tracker.instances(), instances_before_empty_erase);
EXPECT_EQ(tracker.copies() + tracker.moves(), 0);
}
}
TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) {
using Instance = TypeParam;
using InstanceVec = absl::InlinedVector<Instance, 8>;
InstanceTracker tracker;
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
tracker.ResetCopiesMovesSwaps();
InstanceVec v;
for (int i = 0; i < len; i++) {
v.push_back(Instance(i));
}
EXPECT_EQ(tracker.instances(), len);
EXPECT_GE(tracker.copies() + tracker.moves(),
len);
tracker.ResetCopiesMovesSwaps();
{
InstanceVec v_copy(v);
EXPECT_EQ(tracker.instances(), len + len);
EXPECT_EQ(tracker.copies(), len);
EXPECT_EQ(tracker.moves(), 0);
}
EXPECT_EQ(tracker.instances(), len);
}
}
TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) {
using Instance = TypeParam;
using InstanceVec = absl::InlinedVector<Instance, 8>;
InstanceTracker tracker;
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
tracker.ResetCopiesMovesSwaps();
InstanceVec v;
const size_t inlined_capacity = v.capacity();
for (int i = 0; i < len; i++) {
v.push_back(Instance(i));
}
EXPECT_EQ(tracker.instances(), len);
EXPECT_GE(tracker.copies() + tracker.moves(),
len);
tracker.ResetCopiesMovesSwaps();
{
InstanceVec v_copy(std::move(v));
if (static_cast<size_t>(len) > inlined_capacity) {
EXPECT_EQ(tracker.instances(), len);
EXPECT_EQ(tracker.live_instances(), len);
EXPECT_EQ(v.size(), 0u);
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.moves(), 0);
} else {
EXPECT_EQ(tracker.instances(), len + len);
if (Instance::supports_move()) {
EXPECT_EQ(tracker.live_instances(), len);
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.moves(), len);
} else {
EXPECT_EQ(tracker.live_instances(), len + len);
EXPECT_EQ(tracker.copies(), len);
EXPECT_EQ(tracker.moves(), 0);
}
}
EXPECT_EQ(tracker.swaps(), 0);
}
}
}
TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) {
using Instance = TypeParam;
using InstanceVec = absl::InlinedVector<Instance, 8>;
InstanceTracker tracker;
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
for (int longorshort = 0; longorshort <= 1; ++longorshort) {
SCOPED_TRACE(longorshort);
tracker.ResetCopiesMovesSwaps();
InstanceVec longer, shorter;
for (int i = 0; i < len; i++) {
longer.push_back(Instance(i));
shorter.push_back(Instance(i));
}
longer.push_back(Instance(len));
EXPECT_EQ(tracker.instances(), len + len + 1);
EXPECT_GE(tracker.copies() + tracker.moves(),
len + len + 1);
tracker.ResetCopiesMovesSwaps();
if (longorshort) {
shorter = longer;
EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1));
EXPECT_GE(tracker.copies() + tracker.moves(),
len + 1);
} else {
longer = shorter;
EXPECT_EQ(tracker.instances(), len + len);
EXPECT_EQ(tracker.copies() + tracker.moves(), len);
}
}
}
}
TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) {
using Instance = TypeParam;
using InstanceVec = absl::InlinedVector<Instance, 8>;
InstanceTracker tracker;
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
for (int longorshort = 0; longorshort <= 1; ++longorshort) {
SCOPED_TRACE(longorshort);
tracker.ResetCopiesMovesSwaps();
InstanceVec longer, shorter;
const size_t inlined_capacity = longer.capacity();
for (int i = 0; i < len; i++) {
longer.push_back(Instance(i));
shorter.push_back(Instance(i));
}
longer.push_back(Instance(len));
EXPECT_EQ(tracker.instances(), len + len + 1);
EXPECT_GE(tracker.copies() + tracker.moves(),
len + len + 1);
tracker.ResetCopiesMovesSwaps();
int src_len;
if (longorshort) {
src_len = len + 1;
shorter = std::move(longer);
} else {
src_len = len;
longer = std::move(shorter);
}
if (static_cast<size_t>(src_len) > inlined_capacity) {
EXPECT_EQ(tracker.instances(), src_len);
EXPECT_EQ(tracker.live_instances(), src_len);
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.moves(), 0);
} else {
EXPECT_EQ(tracker.instances(), src_len + src_len);
if (Instance::supports_move()) {
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.moves(), src_len);
EXPECT_EQ(tracker.live_instances(), src_len);
} else {
EXPECT_EQ(tracker.copies(), src_len);
EXPECT_EQ(tracker.moves(), 0);
EXPECT_EQ(tracker.live_instances(), src_len + src_len);
}
}
EXPECT_EQ(tracker.swaps(), 0);
}
}
}
TEST(CountElemAssign, SimpleTypeWithInlineBacking) {
const size_t inlined_capacity = absl::InlinedVector<int, 2>().capacity();
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
std::vector<int> original_contents(original_size, 12345);
absl::InlinedVector<int, 2> v(original_contents.begin(),
original_contents.end());
v.assign(2, 123);
EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(123, 123)));
if (original_size <= inlined_capacity) {
EXPECT_EQ(v.capacity(), inlined_capacity);
}
}
}
TEST(CountElemAssign, SimpleTypeWithAllocation) {
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
std::vector<int> original_contents(original_size, 12345);
absl::InlinedVector<int, 2> v(original_contents.begin(),
original_contents.end());
v.assign(3, 123);
EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(123, 123, 123)));
EXPECT_LE(v.size(), v.capacity());
}
}
TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) {
using Instance = TypeParam;
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
std::vector<Instance> original_contents(original_size, Instance(12345));
absl::InlinedVector<Instance, 2> v(original_contents.begin(),
original_contents.end());
v.assign(2, Instance(123));
EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(ValueIs(123), ValueIs(123))));
if (original_size <= 2) {
EXPECT_EQ(2u, v.capacity());
}
}
}
template <typename Instance>
void InstanceCountElemAssignWithAllocationTest() {
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
std::vector<Instance> original_contents(original_size, Instance(12345));
absl::InlinedVector<Instance, 2> v(original_contents.begin(),
original_contents.end());
v.assign(3, Instance(123));
EXPECT_THAT(v, AllOf(SizeIs(3u), ElementsAre(ValueIs(123), ValueIs(123),
ValueIs(123))));
EXPECT_LE(v.size(), v.capacity());
}
}
TEST(CountElemAssign, WithAllocationCopyableInstance) {
InstanceCountElemAssignWithAllocationTest<CopyableOnlyInstance>();
}
TEST(CountElemAssign, WithAllocationCopyableMovableInstance) {
InstanceCountElemAssignWithAllocationTest<CopyableMovableInstance>();
}
TEST(RangedConstructor, SimpleType) {
std::vector<int> source_v = {4, 5, 6};
absl::InlinedVector<int, 4> v(source_v.begin(), source_v.end());
EXPECT_EQ(3u, v.size());
EXPECT_EQ(4u,
v.capacity());
EXPECT_EQ(4, v[0]);
EXPECT_EQ(5, v[1]);
EXPECT_EQ(6, v[2]);
absl::InlinedVector<int, 2> realloc_v(source_v.begin(), source_v.end());
EXPECT_EQ(3u, realloc_v.size());
EXPECT_LT(2u, realloc_v.capacity());
EXPECT_EQ(4, realloc_v[0]);
EXPECT_EQ(5, realloc_v[1]);
EXPECT_EQ(6, realloc_v[2]);
}
template <typename Instance, typename SourceContainer, int inlined_capacity>
void InstanceRangedConstructorTestForContainer() {
InstanceTracker tracker;
SourceContainer source_v = {Instance(0), Instance(1)};
tracker.ResetCopiesMovesSwaps();
absl::InlinedVector<Instance, inlined_capacity> v(source_v.begin(),
source_v.end());
EXPECT_EQ(2u, v.size());
EXPECT_LT(1u, v.capacity());
EXPECT_EQ(0, v[0].value());
EXPECT_EQ(1, v[1].value());
EXPECT_EQ(tracker.copies(), 2);
EXPECT_EQ(tracker.moves(), 0);
}
template <typename Instance, int inlined_capacity>
void InstanceRangedConstructorTestWithCapacity() {
{
SCOPED_TRACE("std::list");
InstanceRangedConstructorTestForContainer<Instance, std::list<Instance>,
inlined_capacity>();
{
SCOPED_TRACE("const std::list");
InstanceRangedConstructorTestForContainer<
Instance, const std::list<Instance>, inlined_capacity>();
}
{
SCOPED_TRACE("std::vector");
InstanceRangedConstructorTestForContainer<Instance, std::vector<Instance>,
inlined_capacity>();
}
{
SCOPED_TRACE("const std::vector");
InstanceRangedConstructorTestForContainer<
Instance, const std::vector<Instance>, inlined_capacity>();
}
}
}
TYPED_TEST_P(InstanceTest, RangedConstructor) {
using Instance = TypeParam;
SCOPED_TRACE("capacity=1");
InstanceRangedConstructorTestWithCapacity<Instance, 1>();
SCOPED_TRACE("capacity=2");
InstanceRangedConstructorTestWithCapacity<Instance, 2>();
}
TEST(RangedConstructor, ElementsAreConstructed) {
std::vector<std::string> source_v = {"cat", "dog"};
absl::InlinedVector<std::string, 1> v(source_v.begin(), source_v.end());
EXPECT_EQ("cat", v[0]);
EXPECT_EQ("dog", v[1]);
}
TEST(RangedAssign, SimpleType) {
const size_t inlined_capacity = absl::InlinedVector<int, 3>().capacity();
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
std::vector<int> original_contents(original_size, 12345);
for (size_t target_size = 0; target_size <= 5; ++target_size) {
SCOPED_TRACE(target_size);
std::vector<int> new_contents;
for (size_t i = 0; i < target_size; ++i) {
new_contents.push_back(static_cast<int>(i + 3));
}
absl::InlinedVector<int, 3> v(original_contents.begin(),
original_contents.end());
v.assign(new_contents.begin(), new_contents.end());
EXPECT_EQ(new_contents.size(), v.size());
EXPECT_LE(new_contents.size(), v.capacity());
if (target_size <= inlined_capacity &&
original_size <= inlined_capacity) {
EXPECT_EQ(v.capacity(), inlined_capacity);
}
EXPECT_THAT(v, ElementsAreArray(new_contents));
}
}
}
template <typename Instance>
static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) {
return lhs.value() == rhs.value();
}
template <typename Instance, typename SourceContainer>
void InstanceRangedAssignTestForContainer() {
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
std::vector<Instance> original_contents(original_size, Instance(12345));
for (size_t target_size = 0; target_size <= 5; ++target_size) {
SCOPED_TRACE(target_size);
std::vector<Instance> new_contents_in;
for (size_t i = 0; i < target_size; ++i) {
new_contents_in.push_back(Instance(static_cast<int>(i) + 3));
}
SourceContainer new_contents(new_contents_in.begin(),
new_contents_in.end());
absl::InlinedVector<Instance, 3> v(original_contents.begin(),
original_contents.end());
v.assign(new_contents.begin(), new_contents.end());
EXPECT_EQ(new_contents.size(), v.size());
EXPECT_LE(new_contents.size(), v.capacity());
if (target_size <= 3 && original_size <= 3) {
EXPECT_EQ(3u, v.capacity());
}
EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(),
InstanceValuesEqual<Instance>));
}
}
}
TYPED_TEST_P(InstanceTest, RangedAssign) {
using Instance = TypeParam;
SCOPED_TRACE("std::list");
InstanceRangedAssignTestForContainer<Instance, std::list<Instance>>();
SCOPED_TRACE("const std::list");
InstanceRangedAssignTestForContainer<Instance, const std::list<Instance>>();
SCOPED_TRACE("std::vector");
InstanceRangedAssignTestForContainer<Instance, std::vector<Instance>>();
SCOPED_TRACE("const std::vector");
InstanceRangedAssignTestForContainer<Instance, const std::vector<Instance>>();
}
TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) {
EXPECT_THAT((absl::InlinedVector<int, 4>{4, 5, 6}),
AllOf(SizeIs(3u), CapacityIs(4u), ElementsAre(4, 5, 6)));
}
TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) {
EXPECT_THAT((absl::InlinedVector<int, 2>{4, 5, 6}),
AllOf(SizeIs(3u), CapacityIs(Gt(2u)), ElementsAre(4, 5, 6)));
}
TEST(InitializerListConstructor, DisparateTypesInList) {
EXPECT_THAT((absl::InlinedVector<int, 2>{-7, 8ULL}), ElementsAre(-7, 8));
EXPECT_THAT((absl::InlinedVector<std::string, 2>{"foo", std::string("bar")}),
ElementsAre("foo", "bar"));
}
TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) {
const size_t inlined_capacity =
absl::InlinedVector<CopyableMovableInstance, 1>().capacity();
EXPECT_THAT(
(absl::InlinedVector<CopyableMovableInstance, 1>{
CopyableMovableInstance(0)}),
AllOf(SizeIs(1u), CapacityIs(inlined_capacity), ElementsAre(ValueIs(0))));
}
TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) {
EXPECT_THAT((absl::InlinedVector<CopyableMovableInstance, 1>{
CopyableMovableInstance(0), CopyableMovableInstance(1)}),
AllOf(SizeIs(2u), CapacityIs(Gt(1u)),
ElementsAre(ValueIs(0), ValueIs(1))));
}
TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) {
for (size_t original_size = 0; original_size <= 4; ++original_size) {
SCOPED_TRACE(original_size);
absl::InlinedVector<int, 2> v1(original_size, 12345);
const size_t original_capacity_v1 = v1.capacity();
v1.assign({3});
EXPECT_THAT(v1, AllOf(SizeIs(1u), CapacityIs(original_capacity_v1),
ElementsAre(3)));
absl::InlinedVector<int, 2> v2(original_size, 12345);
const size_t original_capacity_v2 = v2.capacity();
v2 = {3};
EXPECT_THAT(v2, AllOf(SizeIs(1u), CapacityIs(original_capacity_v2),
ElementsAre(3)));
}
}
TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) {
for (size_t original_size = 0; original_size <= 4; ++original_size) {
SCOPED_TRACE(original_size);
absl::InlinedVector<int, 2> v1(original_size, 12345);
v1.assign({3, 4, 5});
EXPECT_THAT(v1, AllOf(SizeIs(3u), ElementsAre(3, 4, 5)));
EXPECT_LE(3u, v1.capacity());
absl::InlinedVector<int, 2> v2(original_size, 12345);
v2 = {3, 4, 5};
EXPECT_THAT(v2, AllOf(SizeIs(3u), ElementsAre(3, 4, 5)));
EXPECT_LE(3u, v2.capacity());
}
}
TEST(InitializerListAssign, DisparateTypesInList) {
absl::InlinedVector<int, 2> v_int1;
v_int1.assign({-7, 8ULL});
EXPECT_THAT(v_int1, ElementsAre(-7, 8));
absl::InlinedVector<int, 2> v_int2;
v_int2 = {-7, 8ULL};
EXPECT_THAT(v_int2, ElementsAre(-7, 8));
absl::InlinedVector<std::string, 2> v_string1;
v_string1.assign({"foo", std::string("bar")});
EXPECT_THAT(v_string1, ElementsAre("foo", "bar"));
absl::InlinedVector<std::string, 2> v_string2;
v_string2 = {"foo", std::string("bar")};
EXPECT_THAT(v_string2, ElementsAre("foo", "bar"));
}
TYPED_TEST_P(InstanceTest, InitializerListAssign) {
using Instance = TypeParam;
for (size_t original_size = 0; original_size <= 4; ++original_size) {
SCOPED_TRACE(original_size);
absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
const size_t original_capacity = v.capacity();
v.assign({Instance(3)});
EXPECT_THAT(v, AllOf(SizeIs(1u), CapacityIs(original_capacity),
ElementsAre(ValueIs(3))));
}
for (size_t original_size = 0; original_size <= 4; ++original_size) {
SCOPED_TRACE(original_size);
absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
v.assign({Instance(3), Instance(4), Instance(5)});
EXPECT_THAT(
v, AllOf(SizeIs(3u), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
EXPECT_LE(3u, v.capacity());
}
}
REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors,
CountConstructorsDestructorsOnCopyConstruction,
CountConstructorsDestructorsOnMoveConstruction,
CountConstructorsDestructorsOnAssignment,
CountConstructorsDestructorsOnMoveAssignment,
CountElemAssignInlineBacking, RangedConstructor,
RangedAssign, InitializerListAssign);
using InstanceTypes =
::testing::Types<CopyableOnlyInstance, CopyableMovableInstance>;
INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest,
InstanceTypes);
TEST(DynamicVec, DynamicVecCompiles) {
DynamicVec v;
(void)v;
}
TEST(DynamicVec, CreateNonEmptyDynamicVec) {
DynamicVec v(1);
EXPECT_EQ(v.size(), 1u);
}
TEST(DynamicVec, EmplaceBack) {
DynamicVec v;
v.emplace_back(Dynamic{});
EXPECT_EQ(v.size(), 1u);
}
TEST(DynamicVec, EmplaceBackAfterHeapAllocation) {
DynamicVec v;
v.reserve(10);
v.emplace_back(Dynamic{});
EXPECT_EQ(v.size(), 1u);
}
TEST(DynamicVec, EmptyIteratorComparison) {
DynamicVec v;
EXPECT_EQ(v.begin(), v.end());
EXPECT_EQ(v.cbegin(), v.cend());
}
TEST(AllocatorSupportTest, Constructors) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
int64_t allocated = 0;
MyAlloc alloc(&allocated);
{ AllocVec ABSL_ATTRIBUTE_UNUSED v; }
{ AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); }
{ AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); }
{ AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); }
AllocVec v2;
{ AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); }
{ AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); }
}
TEST(AllocatorSupportTest, CountAllocations) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
int64_t allocated = 0;
MyAlloc alloc(&allocated);
{
AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc);
EXPECT_THAT(allocated, Eq(0));
}
EXPECT_THAT(allocated, Eq(0));
{
AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
EXPECT_THAT(allocated, Eq(static_cast<int64_t>(v.size() * sizeof(int))));
}
EXPECT_THAT(allocated, Eq(0));
{
AllocVec v(4, 1, alloc);
EXPECT_THAT(allocated, Eq(0));
int64_t allocated2 = 0;
MyAlloc alloc2(&allocated2);
AllocVec v2(v, alloc2);
EXPECT_THAT(allocated2, Eq(0));
int64_t allocated3 = 0;
MyAlloc alloc3(&allocated3);
AllocVec v3(std::move(v), alloc3);
EXPECT_THAT(allocated3, Eq(0));
}
EXPECT_THAT(allocated, 0);
{
AllocVec v(8, 2, alloc);
EXPECT_THAT(allocated, Eq(static_cast<int64_t>(v.size() * sizeof(int))));
int64_t allocated2 = 0;
MyAlloc alloc2(&allocated2);
AllocVec v2(v, alloc2);
EXPECT_THAT(allocated2, Eq(static_cast<int64_t>(v2.size() * sizeof(int))));
int64_t allocated3 = 0;
MyAlloc alloc3(&allocated3);
AllocVec v3(std::move(v), alloc3);
EXPECT_THAT(allocated3, Eq(static_cast<int64_t>(v3.size() * sizeof(int))));
}
EXPECT_EQ(allocated, 0);
{
AllocVec v(8, 2, alloc);
EXPECT_EQ(allocated, static_cast<int64_t>(8 * sizeof(int)));
v.resize(5);
EXPECT_EQ(allocated, static_cast<int64_t>(8 * sizeof(int)));
v.shrink_to_fit();
EXPECT_EQ(allocated, static_cast<int64_t>(5 * sizeof(int)));
v.resize(4);
EXPECT_EQ(allocated, static_cast<int64_t>(5 * sizeof(int)));
v.shrink_to_fit();
EXPECT_EQ(allocated, 0);
}
}
TEST(AllocatorSupportTest, SwapBothAllocated) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
int64_t allocated1 = 0;
int64_t allocated2 = 0;
{
const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
MyAlloc a1(&allocated1);
MyAlloc a2(&allocated2);
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
EXPECT_LT(v1.capacity(), v2.capacity());
EXPECT_THAT(allocated1,
Eq(static_cast<int64_t>(v1.capacity() * sizeof(int))));
EXPECT_THAT(allocated2,
Eq(static_cast<int64_t>(v2.capacity() * sizeof(int))));
v1.swap(v2);
EXPECT_THAT(v1, ElementsAreArray(ia2));
EXPECT_THAT(v2, ElementsAreArray(ia1));
EXPECT_THAT(allocated1,
Eq(static_cast<int64_t>(v2.capacity() * sizeof(int))));
EXPECT_THAT(allocated2,
Eq(static_cast<int64_t>(v1.capacity() * sizeof(int))));
}
EXPECT_THAT(allocated1, 0);
EXPECT_THAT(allocated2, 0);
}
TEST(AllocatorSupportTest, SwapOneAllocated) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
int64_t allocated1 = 0;
int64_t allocated2 = 0;
{
const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
const int ia2[] = {0, 1, 2, 3};
MyAlloc a1(&allocated1);
MyAlloc a2(&allocated2);
AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
EXPECT_THAT(allocated1,
Eq(static_cast<int64_t>(v1.capacity() * sizeof(int))));
EXPECT_THAT(allocated2, Eq(0));
v1.swap(v2);
EXPECT_THAT(v1, ElementsAreArray(ia2));
EXPECT_THAT(v2, ElementsAreArray(ia1));
EXPECT_THAT(allocated1,
Eq(static_cast<int64_t>(v2.capacity() * sizeof(int))));
EXPECT_THAT(allocated2, Eq(0));
EXPECT_TRUE(v2.get_allocator() == a1);
EXPECT_TRUE(v1.get_allocator() == a2);
}
EXPECT_THAT(allocated1, 0);
EXPECT_THAT(allocated2, 0);
}
TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) {
using StdVector = std::vector<int, CountingAllocator<int>>;
using Alloc = CountingAllocator<StdVector>;
using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
int64_t total_allocated_byte_count = 0;
AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
inlined_case.emplace_back();
int64_t absl_responsible_for_count = total_allocated_byte_count;
#if !defined(_MSC_VER)
EXPECT_EQ(absl_responsible_for_count, 0);
#endif
inlined_case[0].emplace_back();
EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
inlined_case.clear();
inlined_case.shrink_to_fit();
EXPECT_EQ(total_allocated_byte_count, 0);
}
TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) {
using StdVector = std::vector<int, CountingAllocator<int>>;
using Alloc = CountingAllocator<StdVector>;
using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
int64_t total_allocated_byte_count = 0;
AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
allocated_case.emplace_back();
allocated_case.emplace_back();
int64_t absl_responsible_for_count = total_allocated_byte_count;
EXPECT_GT(absl_responsible_for_count, 0);
allocated_case[1].emplace_back();
EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
allocated_case.clear();
allocated_case.shrink_to_fit();
EXPECT_EQ(total_allocated_byte_count, 0);
}
TEST(AllocatorSupportTest, SizeAllocConstructor) {
constexpr size_t inlined_size = 4;
using Alloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, inlined_size, Alloc>;
{
auto len = inlined_size / 2;
int64_t allocated = 0;
auto v = AllocVec(len, Alloc(&allocated));
EXPECT_THAT(allocated, Eq(0));
EXPECT_THAT(v, AllOf(SizeIs(len), Each(0)));
}
{
auto len = inlined_size * 2;
int64_t allocated = 0;
auto v = AllocVec(len, Alloc(&allocated));
EXPECT_THAT(allocated, Eq(static_cast<int64_t>(len * sizeof(int))));
EXPECT_THAT(v, AllOf(SizeIs(len), Each(0)));
}
}
TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) {
using T = int;
using A = std::allocator<T>;
using ATraits = absl::allocator_traits<A>;
struct MinimumAllocator {
using value_type = T;
value_type* allocate(size_t n) {
A a;
return ATraits::allocate(a, n);
}
void deallocate(value_type* p, size_t n) {
A a;
ATraits::deallocate(a, p, n);
}
};
absl::InlinedVector<T, 1, MinimumAllocator> vec;
vec.emplace_back();
vec.resize(0);
}
TEST(InlinedVectorTest, AbslHashValueWorks) {
using V = absl::InlinedVector<int, 4>;
std::vector<V> cases;
for (size_t i = 0; i < 10; ++i) {
V v;
for (int j = 0; j < static_cast<int>(i); ++j) {
v.push_back(j);
}
cases.push_back(v);
v.resize(i % 4);
cases.push_back(v);
}
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
}
class MoveConstructibleOnlyInstance
: public absl::test_internal::BaseCountedInstance {
public:
explicit MoveConstructibleOnlyInstance(int x) : BaseCountedInstance(x) {}
MoveConstructibleOnlyInstance(MoveConstructibleOnlyInstance&& other) =
default;
MoveConstructibleOnlyInstance& operator=(
MoveConstructibleOnlyInstance&& other) = delete;
};
MATCHER(HasValue, "") {
return ::testing::get<0>(arg).value() == ::testing::get<1>(arg);
}
TEST(NonAssignableMoveAssignmentTest, AllocatedToInline) {
using X = MoveConstructibleOnlyInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> inlined;
inlined.emplace_back(1);
absl::InlinedVector<X, 2> allocated;
allocated.emplace_back(1);
allocated.emplace_back(2);
allocated.emplace_back(3);
tracker.ResetCopiesMovesSwaps();
inlined = std::move(allocated);
EXPECT_EQ(tracker.moves(), 0);
EXPECT_EQ(tracker.live_instances(), 3);
EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
}
TEST(NonAssignableMoveAssignmentTest, InlineToAllocated) {
using X = MoveConstructibleOnlyInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> inlined;
inlined.emplace_back(1);
absl::InlinedVector<X, 2> allocated;
allocated.emplace_back(1);
allocated.emplace_back(2);
allocated.emplace_back(3);
tracker.ResetCopiesMovesSwaps();
allocated = std::move(inlined);
EXPECT_EQ(tracker.moves(), 1);
EXPECT_EQ(tracker.live_instances(), 1);
EXPECT_THAT(allocated, Pointwise(HasValue(), {1}));
}
TEST(NonAssignableMoveAssignmentTest, InlineToInline) {
using X = MoveConstructibleOnlyInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> inlined_a;
inlined_a.emplace_back(1);
absl::InlinedVector<X, 2> inlined_b;
inlined_b.emplace_back(1);
tracker.ResetCopiesMovesSwaps();
inlined_a = std::move(inlined_b);
EXPECT_EQ(tracker.moves(), 1);
EXPECT_EQ(tracker.live_instances(), 1);
EXPECT_THAT(inlined_a, Pointwise(HasValue(), {1}));
}
TEST(NonAssignableMoveAssignmentTest, AllocatedToAllocated) {
using X = MoveConstructibleOnlyInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> allocated_a;
allocated_a.emplace_back(1);
allocated_a.emplace_back(2);
allocated_a.emplace_back(3);
absl::InlinedVector<X, 2> allocated_b;
allocated_b.emplace_back(4);
allocated_b.emplace_back(5);
allocated_b.emplace_back(6);
allocated_b.emplace_back(7);
tracker.ResetCopiesMovesSwaps();
allocated_a = std::move(allocated_b);
EXPECT_EQ(tracker.moves(), 0);
EXPECT_EQ(tracker.live_instances(), 4);
EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
}
TEST(NonAssignableMoveAssignmentTest, AssignThis) {
using X = MoveConstructibleOnlyInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> v;
v.emplace_back(1);
v.emplace_back(2);
v.emplace_back(3);
tracker.ResetCopiesMovesSwaps();
v = std::move(*std::addressof(v));
EXPECT_EQ(tracker.moves(), 0);
EXPECT_EQ(tracker.live_instances(), 3);
EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
}
class NonSwappableInstance : public absl::test_internal::BaseCountedInstance {
public:
explicit NonSwappableInstance(int x) : BaseCountedInstance(x) {}
NonSwappableInstance(const NonSwappableInstance& other) = default;
NonSwappableInstance& operator=(const NonSwappableInstance& other) = default;
NonSwappableInstance(NonSwappableInstance&& other) = default;
NonSwappableInstance& operator=(NonSwappableInstance&& other) = default;
};
void swap(NonSwappableInstance&, NonSwappableInstance&) = delete;
TEST(NonSwappableSwapTest, InlineAndAllocatedTransferStorageAndMove) {
using X = NonSwappableInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> inlined;
inlined.emplace_back(1);
absl::InlinedVector<X, 2> allocated;
allocated.emplace_back(1);
allocated.emplace_back(2);
allocated.emplace_back(3);
tracker.ResetCopiesMovesSwaps();
inlined.swap(allocated);
EXPECT_EQ(tracker.moves(), 1);
EXPECT_EQ(tracker.live_instances(), 4);
EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
}
TEST(NonSwappableSwapTest, InlineAndInlineMoveIndividualElements) {
using X = NonSwappableInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> inlined_a;
inlined_a.emplace_back(1);
absl::InlinedVector<X, 2> inlined_b;
inlined_b.emplace_back(2);
tracker.ResetCopiesMovesSwaps();
inlined_a.swap(inlined_b);
EXPECT_EQ(tracker.moves(), 3);
EXPECT_EQ(tracker.live_instances(), 2);
EXPECT_THAT(inlined_a, Pointwise(HasValue(), {2}));
EXPECT_THAT(inlined_b, Pointwise(HasValue(), {1}));
}
TEST(NonSwappableSwapTest, AllocatedAndAllocatedOnlyTransferStorage) {
using X = NonSwappableInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> allocated_a;
allocated_a.emplace_back(1);
allocated_a.emplace_back(2);
allocated_a.emplace_back(3);
absl::InlinedVector<X, 2> allocated_b;
allocated_b.emplace_back(4);
allocated_b.emplace_back(5);
allocated_b.emplace_back(6);
allocated_b.emplace_back(7);
tracker.ResetCopiesMovesSwaps();
allocated_a.swap(allocated_b);
EXPECT_EQ(tracker.moves(), 0);
EXPECT_EQ(tracker.live_instances(), 7);
EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
EXPECT_THAT(allocated_b, Pointwise(HasValue(), {1, 2, 3}));
}
TEST(NonSwappableSwapTest, SwapThis) {
using X = NonSwappableInstance;
InstanceTracker tracker;
absl::InlinedVector<X, 2> v;
v.emplace_back(1);
v.emplace_back(2);
v.emplace_back(3);
tracker.ResetCopiesMovesSwaps();
v.swap(v);
EXPECT_EQ(tracker.moves(), 0);
EXPECT_EQ(tracker.live_instances(), 3);
EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
}
template <size_t N>
using CharVec = absl::InlinedVector<char, N>;
template <typename T>
struct MySpan {
T* data;
size_t size;
};
TEST(StorageTest, InlinedCapacityAutoIncrease) {
EXPECT_GT(CharVec<1>().capacity(), 1);
EXPECT_EQ(CharVec<1>().capacity(), sizeof(MySpan<char>));
EXPECT_EQ(CharVec<1>().capacity(), CharVec<2>().capacity());
EXPECT_EQ(sizeof(CharVec<1>), sizeof(CharVec<2>));
EXPECT_GT((absl::InlinedVector<int, 1>().capacity()), 1);
EXPECT_EQ((absl::InlinedVector<int, 1>().capacity()),
sizeof(MySpan<int>) / sizeof(int));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/inlined_vector.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/inlined_vector_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f090185f-76b0-4a65-9cb4-8dd42979e2c4 | cpp | tensorflow/tensorflow | operator | tensorflow/lite/toco/tflite/operator.cc | tensorflow/lite/toco/tflite/operator_test.cc | #include "tensorflow/lite/toco/tflite/operator.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/graph_transformations/lstm_utils.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tflite/builtin_operator.h"
#include "tensorflow/lite/toco/tflite/custom_operator.h"
#include "tensorflow/lite/toco/tflite/simple_operator.h"
#include "tensorflow/lite/toco/tflite/types.h"
#include "tensorflow/lite/toco/toco_types.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
#include "tensorflow/lite/tools/versioning/op_version.h"
namespace toco {
namespace tflite {
TfLiteType GetTensorType(const ArrayDataType type) {
const std::map<ArrayDataType, TfLiteType> tensor_type_map = {
{ArrayDataType::kBool, kTfLiteBool},
{ArrayDataType::kFloat, kTfLiteFloat32},
{ArrayDataType::kInt8, kTfLiteInt8},
{ArrayDataType::kUint8, kTfLiteUInt8},
{ArrayDataType::kInt16, kTfLiteInt16},
{ArrayDataType::kUint16, kTfLiteUInt16},
{ArrayDataType::kInt32, kTfLiteInt32},
{ArrayDataType::kUint32, kTfLiteUInt32},
{ArrayDataType::kInt64, kTfLiteInt64},
{ArrayDataType::kUint64, kTfLiteUInt64},
{ArrayDataType::kString, kTfLiteString},
{ArrayDataType::kComplex64, kTfLiteComplex64},
{ArrayDataType::kComplex128, kTfLiteComplex128},
{ArrayDataType::kFloat16, kTfLiteFloat16},
{ArrayDataType::kFloat64, kTfLiteFloat64}};
auto it = tensor_type_map.find(type);
if (it != tensor_type_map.end()) {
return it->second;
}
return kTfLiteNoType;
}
::tflite::OpSignature GetVersioningOpSig(
const ::tflite::BuiltinOperator op, const OperatorSignature& op_signature) {
std::vector<::tflite::OpSignatureTensorSpec> inputs, outputs;
for (const auto& input_name : op_signature.op->inputs) {
::tflite::OpSignatureTensorSpec tensor = {kTfLiteNoType};
if (op_signature.model->HasArray(input_name)) {
const Array& input_array = op_signature.model->GetArray(input_name);
tensor.type = GetTensorType(input_array.data_type);
if (input_array.has_shape()) {
tensor.dims = input_array.shape().dims();
}
}
inputs.push_back(tensor);
}
for (const auto& output_name : op_signature.op->outputs) {
::tflite::OpSignatureTensorSpec tensor = {kTfLiteNoType};
if (op_signature.model->HasArray(output_name)) {
const Array& output_array = op_signature.model->GetArray(output_name);
tensor.type = GetTensorType(output_array.data_type);
if (output_array.has_shape()) {
tensor.dims = output_array.shape().dims();
}
}
outputs.push_back(tensor);
}
return ::tflite::OpSignature{op, inputs, outputs};
}
class AveragePool
: public BuiltinOperator<AveragePoolOperator, ::tflite::Pool2DOptions,
::tflite::BuiltinOptions_Pool2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreatePool2DOptions(*builder, padding, op.stride_width,
op.stride_height, op.kwidth,
op.kheight, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->kwidth = options.filter_width();
op->kheight = options.filter_height();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class Convolution
: public BuiltinOperator<ConvOperator, ::tflite::Conv2DOptions,
::tflite::BuiltinOptions_Conv2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateConv2DOptions(*builder, padding, op.stride_width,
op.stride_height, activation_function,
op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class DepthwiseConvolution
: public BuiltinOperator<DepthwiseConvOperator,
::tflite::DepthwiseConv2DOptions,
::tflite::BuiltinOptions_DepthwiseConv2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateDepthwiseConv2DOptions(
*builder, padding, op.stride_width, op.stride_height,
op.depth_multiplier, activation_function, op.dilation_width_factor,
op.dilation_height_factor);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->depth_multiplier = options.depth_multiplier();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
op->dilation_width_factor = options.dilation_w_factor();
op->dilation_height_factor = options.dilation_h_factor();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& conv_op =
static_cast<const DepthwiseConvOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteDepthwiseConvParams depthwise_conv_params = {};
depthwise_conv_params.dilation_width_factor = conv_op.dilation_width_factor;
depthwise_conv_params.dilation_height_factor =
conv_op.dilation_height_factor;
op_sig.builtin_data = reinterpret_cast<void*>(&depthwise_conv_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Add : public BuiltinOperator<AddOperator, ::tflite::AddOptions,
::tflite::BuiltinOptions_AddOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateAddOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class AddN : public BuiltinOperator<AddNOperator, ::tflite::AddNOptions,
::tflite::BuiltinOptions_AddNOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateAddNOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class SpaceToBatchND
: public BuiltinOperator<SpaceToBatchNDOperator,
::tflite::SpaceToBatchNDOptions,
::tflite::BuiltinOptions_SpaceToBatchNDOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSpaceToBatchNDOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Sub : public BuiltinOperator<SubOperator, ::tflite::SubOptions,
::tflite::BuiltinOptions_SubOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateSubOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Div : public BuiltinOperator<DivOperator, ::tflite::DivOptions,
::tflite::BuiltinOptions_DivOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateDivOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class BatchToSpaceND
: public BuiltinOperator<BatchToSpaceNDOperator,
::tflite::BatchToSpaceNDOptions,
::tflite::BuiltinOptions_BatchToSpaceNDOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateBatchToSpaceNDOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
int GetVersion(const OperatorSignature& op_signature) const override {
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Cast : public BuiltinOperator<CastOperator, ::tflite::CastOptions,
::tflite::BuiltinOptions_CastOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateCastOptions(*builder,
DataType::Serialize(op.src_data_type),
DataType::Serialize(op.dst_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->src_data_type = DataType::Deserialize(options.in_data_type());
op->dst_data_type = DataType::Deserialize(options.out_data_type());
}
};
class Concatenation
: public BuiltinOperator<ConcatenationOperator,
::tflite::ConcatenationOptions,
::tflite::BuiltinOptions_ConcatenationOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateConcatenationOptions(*builder, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->axis = options.axis();
}
};
class DepthToSpace
: public BuiltinOperator<DepthToSpaceOperator,
::tflite::DepthToSpaceOptions,
::tflite::BuiltinOptions_DepthToSpaceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateDepthToSpaceOptions(*builder, op.block_size);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->block_size = options.block_size();
}
};
class FakeQuant
: public BuiltinOperator<FakeQuantOperator, ::tflite::FakeQuantOptions,
::tflite::BuiltinOptions_FakeQuantOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateFakeQuantOptions(
*builder, op.minmax->min, op.minmax->max, op.num_bits, op.narrow_range);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
auto* minmax = new MinMax;
minmax->min = options.min();
minmax->max = options.max();
op->minmax.reset(minmax);
op->num_bits = options.num_bits();
op->narrow_range = options.narrow_range();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& fq_op = static_cast<const FakeQuantOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteFakeQuantParams fake_quant_params = {};
fake_quant_params.narrow_range = fq_op.narrow_range;
op_sig.builtin_data = reinterpret_cast<void*>(&fake_quant_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class FullyConnected
: public BuiltinOperator<FullyConnectedOperator,
::tflite::FullyConnectedOptions,
::tflite::BuiltinOptions_FullyConnectedOptions> {
public:
using BuiltinOperator::BuiltinOperator;
::tflite::FullyConnectedOptionsWeightsFormat GetWeightFormat(
FullyConnectedWeightsFormat fmt) const {
switch (fmt) {
case FullyConnectedWeightsFormat::kDefault:
return ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
case FullyConnectedWeightsFormat::kShuffled4x16Int8:
return ::tflite::FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8;
default:
LOG(ERROR) << "Unhandled FC weights format";
return ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT;
}
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateFullyConnectedOptions(
*builder, activation_function, GetWeightFormat(op.weights_format));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
switch (options.weights_format()) {
case ::tflite::FullyConnectedOptionsWeightsFormat_DEFAULT:
op->weights_format = FullyConnectedWeightsFormat::kDefault;
break;
case ::tflite::FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
op->weights_format = FullyConnectedWeightsFormat::kShuffled4x16Int8;
break;
default:
LOG(ERROR) << "Unhandled FC weights format";
op->weights_format = FullyConnectedWeightsFormat::kDefault;
}
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& fc_op =
static_cast<const FullyConnectedOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteFullyConnectedParams fully_connected_params = {};
fully_connected_params.keep_num_dims = fc_op.keep_num_dims;
fully_connected_params.weights_format =
static_cast<TfLiteFullyConnectedWeightsFormat>(
GetWeightFormat(fc_op.weights_format));
op_sig.builtin_data = reinterpret_cast<void*>(&fully_connected_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Gather : public BuiltinOperator<GatherOperator, ::tflite::GatherOptions,
::tflite::BuiltinOptions_GatherOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
int axis = op.axis ? op.axis.value() : 0;
return ::tflite::CreateGatherOptions(*builder, axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->axis = {options.axis()};
}
};
class GatherNd
: public BuiltinOperator<GatherNdOperator, ::tflite::GatherNdOptions,
::tflite::BuiltinOptions_GatherNdOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateGatherNdOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Svdf : public BuiltinOperator<SvdfOperator, ::tflite::SVDFOptions,
::tflite::BuiltinOptions_SVDFOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateSVDFOptions(*builder, op.rank, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
op->rank = options.rank();
}
};
class L2Normalization
: public BuiltinOperator<L2NormalizationOperator, ::tflite::L2NormOptions,
::tflite::BuiltinOptions_L2NormOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateL2NormOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class L2Pool : public BuiltinOperator<L2PoolOperator, ::tflite::Pool2DOptions,
::tflite::BuiltinOptions_Pool2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreatePool2DOptions(*builder, padding, op.stride_width,
op.stride_height, op.kwidth,
op.kheight, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->kwidth = options.filter_width();
op->kheight = options.filter_height();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class LocalResponseNormalization
: public BuiltinOperator<
LocalResponseNormalizationOperator,
::tflite::LocalResponseNormalizationOptions,
::tflite::BuiltinOptions_LocalResponseNormalizationOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateLocalResponseNormalizationOptions(
*builder, op.range, op.bias, op.alpha, op.beta);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->range = options.radius();
op->bias = options.bias();
op->alpha = options.alpha();
op->beta = options.beta();
}
};
class MaxPool : public BuiltinOperator<MaxPoolOperator, ::tflite::Pool2DOptions,
::tflite::BuiltinOptions_Pool2DOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreatePool2DOptions(*builder, padding, op.stride_width,
op.stride_height, op.kwidth,
op.kheight, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
op->kwidth = options.filter_width();
op->kheight = options.filter_height();
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
};
class Mul : public BuiltinOperator<MulOperator, ::tflite::MulOptions,
::tflite::BuiltinOptions_MulOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto activation_function =
ActivationFunction::Serialize(op.fused_activation_function);
return ::tflite::CreateMulOptions(*builder, activation_function);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->fused_activation_function =
ActivationFunction::Deserialize(options.fused_activation_function());
}
int GetVersion(const OperatorSignature& op_signature) const override {
const std::string& input1_name = op_signature.op->inputs[0];
const std::string& input2_name = op_signature.op->inputs[1];
const std::string& output_name = op_signature.op->outputs[0];
const Array& input1_array = op_signature.model->GetArray(input1_name);
const Array& input2_array = op_signature.model->GetArray(input2_name);
const Array& output_array = op_signature.model->GetArray(output_name);
const auto& input1_quant = input1_array.quantization_params;
const auto& input2_quant = input2_array.quantization_params;
const auto& output_quant = output_array.quantization_params;
const float input1_scale = input1_quant ? input1_quant->scale : 0.0f;
const float input2_scale = input2_quant ? input2_quant->scale : 0.0f;
const float output_scale = output_quant ? output_quant->scale : 0.0f;
const bool input_quantized = input1_quant || input2_quant;
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
op_sig.ext_options.mul.input1_scale = input1_scale;
op_sig.ext_options.mul.input2_scale = input2_scale;
op_sig.ext_options.mul.output_scale = output_scale;
op_sig.ext_options.mul.input_quantized = input_quantized;
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Pad : public BuiltinOperator<PadOperator, ::tflite::PadOptions,
::tflite::BuiltinOptions_PadOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreatePadOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Tile
: public BuiltinOperator<TensorFlowTileOperator, ::tflite::TileOptions,
::tflite::BuiltinOptions_TileOptions> {
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateTileOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class PadV2 : public BuiltinOperator<PadV2Operator, ::tflite::PadV2Options,
::tflite::BuiltinOptions_PadV2Options> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreatePadV2Options(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Reshape
: public BuiltinOperator<TensorFlowReshapeOperator,
::tflite::ReshapeOptions,
::tflite::BuiltinOptions_ReshapeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReshapeOptions(*builder,
builder->CreateVector(op.shape));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->shape.insert(op->shape.end(), options.new_shape()->begin(),
options.new_shape()->end());
}
};
class Softmax
: public BuiltinOperator<SoftmaxOperator, ::tflite::SoftmaxOptions,
::tflite::BuiltinOptions_SoftmaxOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSoftmaxOptions(*builder, op.beta);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->beta = options.beta();
}
};
class SpaceToDepth
: public BuiltinOperator<SpaceToDepthOperator,
::tflite::SpaceToDepthOptions,
::tflite::BuiltinOptions_SpaceToDepthOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSpaceToDepthOptions(*builder, op.block_size);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->block_size = options.block_size();
}
};
class Transpose
: public BuiltinOperator<TransposeOperator, ::tflite::TransposeOptions,
::tflite::BuiltinOptions_TransposeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateTransposeOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Lstm : public BuiltinOperator<LstmCellOperator, ::tflite::LSTMOptions,
::tflite::BuiltinOptions_LSTMOptions> {
public:
using BuiltinOperator::BuiltinOperator;
::tflite::LSTMKernelType GetKernelType(
LstmCellOperator::KernelType type) const {
switch (type) {
case LstmCellOperator::KERNEL_BASIC:
return ::tflite::LSTMKernelType_BASIC;
break;
case LstmCellOperator::KERNEL_FULL:
return ::tflite::LSTMKernelType_FULL;
break;
default:
LOG(ERROR) << "Unhandled Kernel Type";
return static_cast<::tflite::LSTMKernelType>(-1);
}
}
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
::tflite::LSTMKernelType kernel_type = GetKernelType(op.kernel_type);
return ::tflite::CreateLSTMOptions(*builder,
::tflite::ActivationFunctionType_TANH,
0.0,
0.0, kernel_type);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
CHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
switch (options.kernel_type()) {
case ::tflite::LSTMKernelType_BASIC:
op->kernel_type = LstmCellOperator::KERNEL_BASIC;
break;
case ::tflite::LSTMKernelType_FULL:
op->kernel_type = LstmCellOperator::KERNEL_FULL;
break;
}
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& lstm_op =
static_cast<const LstmCellOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteLSTMParams lstm_params = {};
lstm_params.kernel_type =
static_cast<TfLiteLSTMKernelType>(GetKernelType(lstm_op.kernel_type));
op_sig.builtin_data = reinterpret_cast<void*>(&lstm_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
const auto& lstm_op = static_cast<const LstmCellOperator&>(op);
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
switch (lstm_op.kernel_type) {
case LstmCellOperator::KERNEL_FULL: {
mutating_input_variables[kInputActivationStateTensor] = true;
mutating_input_variables[kInputCellStateTensor] = true;
break;
}
case LstmCellOperator::KERNEL_BASIC: {
mutating_input_variables[LstmCellOperator::PREV_ACTIV_INPUT] = true;
mutating_input_variables[LstmCellOperator::PREV_STATE_INPUT] = true;
break;
}
}
return mutating_input_variables;
}
};
class UnidirectionalSequenceLstm
: public BuiltinOperator<
UnidirectionalSequenceLstmOperator,
::tflite::UnidirectionalSequenceLSTMOptions,
::tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateUnidirectionalSequenceLSTMOptions(
*builder,
::tflite::ActivationFunctionType_TANH,
0.0,
0.0,
true);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[kInputActivationStateTensor] = true;
mutating_input_variables[kInputCellStateTensor] = true;
return mutating_input_variables;
}
};
class BidirectionalSequenceLstm
: public BuiltinOperator<
BidirectionalSequenceLstmOperator,
::tflite::BidirectionalSequenceLSTMOptions,
::tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateBidirectionalSequenceLSTMOptions(
*builder,
::tflite::ActivationFunctionType_TANH,
0.0,
0.0,
op.merge_outputs,
true);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
op->merge_outputs = options.merge_outputs();
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[35] = true;
mutating_input_variables[36] = true;
mutating_input_variables[37] = true;
mutating_input_variables[38] = true;
return mutating_input_variables;
}
};
class BidirectionalSequenceRnn
: public BuiltinOperator<
BidirectionalSequenceRnnOperator,
::tflite::BidirectionalSequenceRNNOptions,
::tflite::BuiltinOptions_BidirectionalSequenceRNNOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateBidirectionalSequenceRNNOptions(
*builder, true,
::tflite::ActivationFunctionType_TANH,
op.merge_outputs);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
op->merge_outputs = options.merge_outputs();
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[4] = true;
mutating_input_variables[8] = true;
return mutating_input_variables;
}
};
class Mean : public BuiltinOperator<MeanOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class Sum
: public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceMax
: public BuiltinOperator<TensorFlowMaxOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceMin
: public BuiltinOperator<TensorFlowMinOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceProd
: public BuiltinOperator<TensorFlowProdOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ReduceAny
: public BuiltinOperator<TensorFlowAnyOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
};
class ResizeBilinear
: public BuiltinOperator<ResizeBilinearOperator,
::tflite::ResizeBilinearOptions,
::tflite::BuiltinOptions_ResizeBilinearOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateResizeBilinearOptions(*builder, op.align_corners,
op.half_pixel_centers);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->align_corners = options.align_corners();
op->half_pixel_centers = options.half_pixel_centers();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& resize_bilinear_op =
static_cast<const ResizeBilinearOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteResizeBilinearParams resize_bilinear_params = {};
resize_bilinear_params.half_pixel_centers =
resize_bilinear_op.half_pixel_centers;
resize_bilinear_params.align_corners = resize_bilinear_op.align_corners;
op_sig.builtin_data = reinterpret_cast<void*>(&resize_bilinear_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class ResizeNearestNeighbor
: public BuiltinOperator<
ResizeNearestNeighborOperator, ::tflite::ResizeNearestNeighborOptions,
::tflite::BuiltinOptions_ResizeNearestNeighborOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateResizeNearestNeighborOptions(
*builder, op.align_corners, op.half_pixel_centers);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->align_corners = options.align_corners();
op->half_pixel_centers = options.half_pixel_centers();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& resize_nn_op =
static_cast<const ResizeNearestNeighborOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
TfLiteResizeNearestNeighborParams resize_nearest_neighbor_params = {};
resize_nearest_neighbor_params.half_pixel_centers =
resize_nn_op.half_pixel_centers;
resize_nearest_neighbor_params.align_corners = resize_nn_op.align_corners;
op_sig.builtin_data =
reinterpret_cast<void*>(&resize_nearest_neighbor_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class Squeeze
: public BuiltinOperator<SqueezeOperator, ::tflite::SqueezeOptions,
::tflite::BuiltinOptions_SqueezeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto squeeze_dims = builder->CreateVector(op.squeeze_dims);
return ::tflite::CreateSqueezeOptions(*builder, squeeze_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->squeeze_dims.insert(op->squeeze_dims.end(),
options.squeeze_dims()->begin(),
options.squeeze_dims()->end());
}
};
class Split
: public BuiltinOperator<TensorFlowSplitOperator, ::tflite::SplitOptions,
::tflite::BuiltinOptions_SplitOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSplitOptions(*builder, op.num_split);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->num_split = options.num_splits();
}
};
class SplitV
: public BuiltinOperator<TensorFlowSplitVOperator, ::tflite::SplitVOptions,
::tflite::BuiltinOptions_SplitVOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSplitVOptions(*builder, op.num_split);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->num_split = options.num_splits();
}
};
class StridedSlice
: public BuiltinOperator<StridedSliceOperator,
::tflite::StridedSliceOptions,
::tflite::BuiltinOptions_StridedSliceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateStridedSliceOptions(
*builder, op.begin_mask, op.end_mask, op.ellipsis_mask,
op.new_axis_mask, op.shrink_axis_mask);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->begin_mask = options.begin_mask();
op->end_mask = options.end_mask();
op->ellipsis_mask = options.ellipsis_mask();
op->new_axis_mask = options.new_axis_mask();
op->shrink_axis_mask = options.shrink_axis_mask();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const auto& ss_op =
static_cast<const StridedSliceOperator&>(*op_signature.op);
::tflite::OpSignature op_sig =
GetVersioningOpSig(builtin_op(), op_signature);
op_sig.ext_options.strided_slice.num_dims = ss_op.start_indices.size();
TfLiteStridedSliceParams strided_slice_params = {};
strided_slice_params.ellipsis_mask = ss_op.ellipsis_mask;
strided_slice_params.new_axis_mask = ss_op.new_axis_mask;
op_sig.builtin_data = reinterpret_cast<void*>(&strided_slice_params);
return ::tflite::GetBuiltinOperatorVersion(op_sig);
}
};
class TopK_V2 : public BuiltinOperator<TopKV2Operator, ::tflite::TopKV2Options,
::tflite::BuiltinOptions_TopKV2Options> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateTopKV2Options(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class ArgMax : public BuiltinOperator<ArgMaxOperator, ::tflite::ArgMaxOptions,
::tflite::BuiltinOptions_ArgMaxOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateArgMaxOptions(
*builder, DataType::Serialize(op.output_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->output_data_type = DataType::Deserialize(options.output_type());
}
};
class ArgMin : public BuiltinOperator<ArgMinOperator, ::tflite::ArgMinOptions,
::tflite::BuiltinOptions_ArgMinOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateArgMinOptions(
*builder, DataType::Serialize(op.output_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->output_data_type = DataType::Deserialize(options.output_type());
}
};
class TransposeConv
: public BuiltinOperator<TransposeConvOperator,
::tflite::TransposeConvOptions,
::tflite::BuiltinOptions_TransposeConvOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto padding = Padding::Serialize(op.padding.type);
return ::tflite::CreateTransposeConvOptions(
*builder, padding, op.stride_width, op.stride_height);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->padding.type = Padding::Deserialize(options.padding());
op->stride_width = options.stride_w();
op->stride_height = options.stride_h();
}
};
class SparseToDense
: public BuiltinOperator<SparseToDenseOperator,
::tflite::SparseToDenseOptions,
::tflite::BuiltinOptions_SparseToDenseOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSparseToDenseOptions(*builder, op.validate_indices);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->validate_indices = options.validate_indices();
}
};
class ExpandDims
: public BuiltinOperator<ExpandDimsOperator, ::tflite::ExpandDimsOptions,
::tflite::BuiltinOptions_ExpandDimsOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateExpandDimsOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class Pack : public BuiltinOperator<PackOperator, ::tflite::PackOptions,
::tflite::BuiltinOptions_PackOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreatePackOptions(*builder, op.values_count, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->values_count = options.values_count();
op->axis = options.axis();
}
};
class Shape
: public BuiltinOperator<TensorFlowShapeOperator, ::tflite::ShapeOptions,
::tflite::BuiltinOptions_ShapeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateShapeOptions(
*builder, DataType::Serialize(op.output_data_type));
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->output_data_type = DataType::Deserialize(options.out_type());
}
};
class OneHot : public BuiltinOperator<OneHotOperator, ::tflite::OneHotOptions,
::tflite::BuiltinOptions_OneHotOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateOneHotOptions(*builder, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->axis = options.axis();
}
};
class CTCBeamSearchDecoder
: public CustomOperator<CTCBeamSearchDecoderOperator> {
public:
using CustomOperator::CustomOperator;
void WriteOptions(const TocoOperator& op,
flexbuffers::Builder* fbb) const override {
fbb->Int("beam_width", op.beam_width);
fbb->Int("top_paths", op.top_paths);
fbb->Bool("merge_repeated", op.merge_repeated);
}
void ReadOptions(const flexbuffers::Map& m, TocoOperator* op) const override {
op->beam_width = m["beam_width"].AsInt32();
op->top_paths = m["top_paths"].AsInt32();
op->merge_repeated = m["merge_repeated"].AsBool();
}
int GetVersion(const OperatorSignature& op_signature) const override {
return 1;
}
};
class Unpack : public BuiltinOperator<UnpackOperator, ::tflite::UnpackOptions,
::tflite::BuiltinOptions_UnpackOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateUnpackOptions(*builder, op.num, op.axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->num = options.num();
op->axis = options.axis();
}
int GetVersion(const OperatorSignature& op_signature) const override {
const std::string& input_name = op_signature.op->inputs[0];
const Array& input_array = op_signature.model->GetArray(input_name);
if (input_array.data_type == ArrayDataType::kInt8 ||
input_array.data_type == ArrayDataType::kUint8) {
return 2;
}
if (input_array.data_type == ArrayDataType::kBool) {
return 3;
}
return 1;
}
};
class LeakyRelu
: public BuiltinOperator<LeakyReluOperator, ::tflite::LeakyReluOptions,
::tflite::BuiltinOptions_LeakyReluOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateLeakyReluOptions(*builder, op.alpha);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->alpha = options.alpha();
}
};
class SquaredDifference
: public BuiltinOperator<
SquaredDifferenceOperator, ::tflite::SquaredDifferenceOptions,
::tflite::BuiltinOptions_SquaredDifferenceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSquaredDifferenceOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class MirrorPad
: public BuiltinOperator<MirrorPadOperator, ::tflite::MirrorPadOptions,
::tflite::BuiltinOptions_MirrorPadOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateMirrorPadOptions(
*builder, op.mode == MirrorPadMode::kReflect
? ::tflite::MirrorPadMode::MirrorPadMode_REFLECT
: ::tflite::MirrorPadMode::MirrorPadMode_SYMMETRIC);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->mode = options.mode() == ::tflite::MirrorPadMode::MirrorPadMode_REFLECT
? MirrorPadMode::kReflect
: MirrorPadMode::kSymmetric;
}
};
class Unique : public BuiltinOperator<UniqueOperator, ::tflite::UniqueOptions,
::tflite::BuiltinOptions_UniqueOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
const UniqueOperator& unique_op = static_cast<const UniqueOperator&>(op);
return ::tflite::CreateUniqueOptions(
*builder, unique_op.idx_out_type == toco::ArrayDataType::kInt64
? ::tflite::TensorType::TensorType_INT64
: ::tflite::TensorType_INT32);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
UniqueOperator* unique_op = static_cast<UniqueOperator*>(op);
unique_op->idx_out_type =
options.idx_out_type() == ::tflite::TensorType_INT64
? toco::ArrayDataType::kInt64
: toco::ArrayDataType::kInt32;
}
};
class UnidirectionalSequenceRnn
: public BuiltinOperator<UnidirectionalSequenceRnnOperator,
::tflite::SequenceRNNOptions,
::tflite::BuiltinOptions_SequenceRNNOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateSequenceRNNOptions(
*builder, true,
::tflite::ActivationFunctionType_TANH);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
DCHECK(options.fused_activation_function() ==
::tflite::ActivationFunctionType_TANH);
}
std::vector<bool> GetMutatingInputVariables(
const Operator& op) const override {
std::vector<bool> mutating_input_variables(op.inputs.size(), false);
mutating_input_variables[4] = true;
return mutating_input_variables;
}
};
class Where : public BuiltinOperator<WhereOperator, ::tflite::WhereOptions,
::tflite::BuiltinOptions_WhereOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateWhereOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
std::unique_ptr<flexbuffers::Builder> WriteFlexOpOptions(
const std::string& tensorflow_node_def) {
auto fbb = std::make_unique<flexbuffers::Builder>();
::tensorflow::NodeDef node_def;
if (!node_def.ParseFromString(tensorflow_node_def)) {
LOG(ERROR) << "Failed to parse TensorFlow NodeDef";
return {};
}
fbb->Vector([&]() {
fbb->String(node_def.op());
fbb->String(tensorflow_node_def);
});
fbb->Finish();
LOG(INFO) << "Writing flex op: " << node_def.op();
return std::unique_ptr<flexbuffers::Builder>(fbb.release());
}
class TensorFlowUnsupported : public BaseOperator {
public:
TensorFlowUnsupported(const std::string& name, OperatorType type,
bool enable_select_tf_ops)
: BaseOperator(name, type), enable_select_tf_ops_(enable_select_tf_ops) {}
Options Serialize(const Operator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
auto fbb =
WriteOptions(static_cast<const TensorFlowUnsupportedOperator&>(op));
if (fbb) {
return Options::Custom(builder->CreateVector(fbb->GetBuffer()));
} else {
return Options::Custom(0);
}
}
std::unique_ptr<Operator> Deserialize(
const BuiltinOptions* builtin_options,
const CustomOptions* custom_options) const override {
auto op = std::make_unique<TensorFlowUnsupportedOperator>();
if (custom_options) {
auto flexbuffer_map =
flexbuffers::GetRoot(custom_options->data(), custom_options->size())
.AsMap();
ReadOptions(flexbuffer_map, op.get());
}
return std::unique_ptr<Operator>(op.release());
}
std::unique_ptr<flexbuffers::Builder> WriteOptions(
const TensorFlowUnsupportedOperator& op) const {
if (enable_select_tf_ops_) {
return WriteFlexOpOptions(op.tensorflow_node_def);
}
auto fbb = std::make_unique<flexbuffers::Builder>();
::tensorflow::NodeDef node_def;
if (!node_def.ParseFromString(op.tensorflow_node_def)) {
LOG(ERROR) << "Failed to parse TensorFlow NodeDef";
return std::unique_ptr<flexbuffers::Builder>();
}
if (ShouldExportAsFlexOp(enable_select_tf_ops_, node_def.op())) {
fbb->Vector([&]() {
fbb->String(node_def.op());
fbb->String(op.tensorflow_node_def);
});
fbb->Finish();
LOG(INFO) << "Writing flex op: " << node_def.op();
return std::unique_ptr<flexbuffers::Builder>(fbb.release());
}
bool has_valid_attr = false;
size_t map_start = fbb->StartMap();
for (const auto& pair : node_def.attr()) {
const char* key = pair.first.c_str();
const auto& attr = pair.second;
switch (attr.value_case()) {
case ::tensorflow::AttrValue::kS:
fbb->String(key, attr.s());
has_valid_attr = true;
break;
case ::tensorflow::AttrValue::kI:
fbb->Int(key, attr.i());
has_valid_attr = true;
break;
case ::tensorflow::AttrValue::kF:
fbb->Float(key, attr.f());
has_valid_attr = true;
break;
case ::tensorflow::AttrValue::kB:
fbb->Bool(key, attr.b());
has_valid_attr = true;
break;
case tensorflow::AttrValue::kList:
if (attr.list().s_size() > 0) {
auto start = fbb->StartVector(key);
for (const std::string& v : attr.list().s()) {
fbb->Add(v);
}
fbb->EndVector(start, true, false);
has_valid_attr = true;
} else if (attr.list().i_size() > 0) {
auto start = fbb->StartVector(key);
for (const int64_t v : attr.list().i()) {
fbb->Add(v);
}
fbb->EndVector(start, true, false);
has_valid_attr = true;
} else if (attr.list().f_size() > 0) {
auto start = fbb->StartVector(key);
for (const float v : attr.list().f()) {
fbb->Add(v);
}
fbb->EndVector(start, true, false);
has_valid_attr = true;
} else {
LOG(WARNING)
<< "Ignoring unsupported type in list attribute with key '"
<< key << "'";
}
break;
default:
LOG(WARNING) << "Ignoring unsupported attribute type with key '"
<< key << "'";
break;
}
}
if (!has_valid_attr) {
return std::unique_ptr<flexbuffers::Builder>();
}
fbb->EndMap(map_start);
fbb->Finish();
return std::unique_ptr<flexbuffers::Builder>(fbb.release());
}
void ReadOptions(const flexbuffers::Map& m,
TensorFlowUnsupportedOperator* op) const {
::tensorflow::NodeDef node_def;
auto attr = node_def.mutable_attr();
const auto& keys = m.Keys();
for (size_t i = 0; i < keys.size(); ++i) {
const auto key = keys[i].AsKey();
const auto& value = m[key];
switch (value.GetType()) {
case flexbuffers::FBT_STRING:
(*attr)[key].set_s(value.AsString().c_str());
break;
case flexbuffers::FBT_INT:
(*attr)[key].set_i(value.AsInt64());
break;
case flexbuffers::FBT_FLOAT:
(*attr)[key].set_f(value.AsFloat());
break;
case flexbuffers::FBT_BOOL:
(*attr)[key].set_b(value.AsBool());
if (std::string(key) == "_output_quantized") {
op->quantized = value.AsBool();
}
if (std::string(key) ==
"_support_output_type_float_in_quantized_op") {
op->support_output_type_float_in_quantized_op = value.AsBool();
}
break;
case flexbuffers::FBT_VECTOR_INT: {
auto* list = (*attr)[key].mutable_list();
const auto& vector = value.AsTypedVector();
for (size_t i = 0; i < vector.size(); i++) {
list->add_i(vector[i].AsInt64());
}
break;
}
case flexbuffers::FBT_VECTOR_FLOAT: {
auto* list = (*attr)[key].mutable_list();
const auto& vector = value.AsTypedVector();
for (size_t i = 0; i < vector.size(); i++) {
list->add_f(vector[i].AsFloat());
}
break;
}
case 15 : {
auto* list = (*attr)[key].mutable_list();
const auto& vector = value.AsTypedVector();
for (size_t i = 0; i < vector.size(); i++) {
list->add_s(vector[i].AsString().str());
}
break;
}
default:
LOG(WARNING) << "Ignoring unsupported attribute type with key '"
<< key << "'";
break;
}
}
node_def.SerializeToString(&op->tensorflow_node_def);
}
int GetVersion(const OperatorSignature& op_signature) const override {
return 1;
}
private:
const bool enable_select_tf_ops_;
};
class Dequantize
: public BuiltinOperator<DequantizeOperator, ::tflite::DequantizeOptions,
::tflite::BuiltinOptions_DequantizeOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateDequantizeOptions(*builder);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {}
};
class ReverseSequence
: public BuiltinOperator<ReverseSequenceOperator,
::tflite::ReverseSequenceOptions,
::tflite::BuiltinOptions_ReverseSequenceOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReverseSequenceOptions(*builder, op.seq_dim,
op.batch_dim);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->seq_dim = options.seq_dim();
op->batch_dim = options.batch_dim();
}
};
namespace {
std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList(
bool enable_select_tf_ops = false) {
std::vector<std::unique_ptr<BaseOperator>> ops;
ops.push_back(
std::make_unique<Add>(::tflite::BuiltinOperator_ADD, OperatorType::kAdd));
ops.push_back(std::make_unique<AddN>(::tflite::BuiltinOperator_ADD_N,
OperatorType::kAddN));
ops.push_back(
std::make_unique<Div>(::tflite::BuiltinOperator_DIV, OperatorType::kDiv));
ops.push_back(
std::make_unique<Sub>(::tflite::BuiltinOperator_SUB, OperatorType::kSub));
ops.push_back(std::make_unique<AveragePool>(
::tflite::BuiltinOperator_AVERAGE_POOL_2D, OperatorType::kAveragePool));
ops.push_back(std::make_unique<SpaceToBatchND>(
::tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
OperatorType::kSpaceToBatchND));
ops.push_back(std::make_unique<BatchToSpaceND>(
::tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
OperatorType::kBatchToSpaceND));
ops.push_back(std::make_unique<Concatenation>(
::tflite::BuiltinOperator_CONCATENATION, OperatorType::kConcatenation));
ops.push_back(std::make_unique<Convolution>(::tflite::BuiltinOperator_CONV_2D,
OperatorType::kConv));
ops.push_back(std::make_unique<DepthwiseConvolution>(
::tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
OperatorType::kDepthwiseConv));
ops.push_back(std::make_unique<Dequantize>(
::tflite::BuiltinOperator_DEQUANTIZE, OperatorType::kDequantize));
ops.push_back(std::make_unique<FullyConnected>(
::tflite::BuiltinOperator_FULLY_CONNECTED,
OperatorType::kFullyConnected));
ops.push_back(std::make_unique<Gather>(::tflite::BuiltinOperator_GATHER,
OperatorType::kGather));
ops.push_back(std::make_unique<GatherNd>(::tflite::BuiltinOperator_GATHER_ND,
OperatorType::kGatherNd));
ops.push_back(std::make_unique<L2Normalization>(
::tflite::BuiltinOperator_L2_NORMALIZATION,
OperatorType::kL2Normalization));
ops.push_back(std::make_unique<L2Pool>(::tflite::BuiltinOperator_L2_POOL_2D,
OperatorType::kL2Pool));
ops.push_back(std::make_unique<LocalResponseNormalization>(
::tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
OperatorType::kLocalResponseNormalization));
ops.push_back(std::make_unique<MaxPool>(::tflite::BuiltinOperator_MAX_POOL_2D,
OperatorType::kMaxPool));
ops.push_back(
std::make_unique<Mul>(::tflite::BuiltinOperator_MUL, OperatorType::kMul));
ops.push_back(
std::make_unique<Pad>(::tflite::BuiltinOperator_PAD, OperatorType::kPad));
ops.push_back(std::make_unique<PadV2>(::tflite::BuiltinOperator_PADV2,
OperatorType::kPadV2));
ops.push_back(std::make_unique<Reshape>(::tflite::BuiltinOperator_RESHAPE,
OperatorType::kReshape));
ops.push_back(std::make_unique<Softmax>(::tflite::BuiltinOperator_SOFTMAX,
OperatorType::kSoftmax));
ops.push_back(std::make_unique<SpaceToDepth>(
::tflite::BuiltinOperator_SPACE_TO_DEPTH, OperatorType::kSpaceToDepth));
ops.push_back(std::make_unique<DepthToSpace>(
::tflite::BuiltinOperator_DEPTH_TO_SPACE, OperatorType::kDepthToSpace));
ops.push_back(std::make_unique<Svdf>(::tflite::BuiltinOperator_SVDF,
OperatorType::kSvdf));
ops.push_back(std::make_unique<Transpose>(::tflite::BuiltinOperator_TRANSPOSE,
OperatorType::kTranspose));
ops.push_back(std::make_unique<Mean>(::tflite::BuiltinOperator_MEAN,
OperatorType::kMean));
ops.push_back(
std::make_unique<Sum>(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
ops.push_back(std::make_unique<ReduceProd>(
::tflite::BuiltinOperator_REDUCE_PROD, OperatorType::kReduceProd));
ops.push_back(std::make_unique<ReduceMax>(
::tflite::BuiltinOperator_REDUCE_MAX, OperatorType::kReduceMax));
ops.push_back(std::make_unique<ReduceMin>(
::tflite::BuiltinOperator_REDUCE_MIN, OperatorType::kReduceMin));
ops.push_back(std::make_unique<ReduceAny>(
::tflite::BuiltinOperator_REDUCE_ANY, OperatorType::kAny));
ops.push_back(std::make_unique<ResizeBilinear>(
::tflite::BuiltinOperator_RESIZE_BILINEAR,
OperatorType::kResizeBilinear));
ops.push_back(std::make_unique<ResizeNearestNeighbor>(
::tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
OperatorType::kResizeNearestNeighbor));
ops.push_back(std::make_unique<Squeeze>(::tflite::BuiltinOperator_SQUEEZE,
OperatorType::kSqueeze));
ops.push_back(std::make_unique<Split>(::tflite::BuiltinOperator_SPLIT,
OperatorType::kSplit));
ops.push_back(std::make_unique<SplitV>(::tflite::BuiltinOperator_SPLIT_V,
OperatorType::kSplitV));
ops.push_back(std::make_unique<StridedSlice>(
::tflite::BuiltinOperator_STRIDED_SLICE, OperatorType::kStridedSlice));
ops.push_back(std::make_unique<TopK_V2>(::tflite::BuiltinOperator_TOPK_V2,
OperatorType::kTopK_V2));
ops.push_back(std::make_unique<Lstm>(::tflite::BuiltinOperator_LSTM,
OperatorType::kLstmCell));
ops.push_back(std::make_unique<Cast>(::tflite::BuiltinOperator_CAST,
OperatorType::kCast));
ops.push_back(std::make_unique<ArgMax>(::tflite::BuiltinOperator_ARG_MAX,
OperatorType::kArgMax));
ops.push_back(std::make_unique<ArgMin>(::tflite::BuiltinOperator_ARG_MIN,
OperatorType::kArgMin));
ops.push_back(std::make_unique<Tile>(::tflite::BuiltinOperator_TILE,
OperatorType::kTile));
ops.push_back(std::make_unique<ExpandDims>(
::tflite::BuiltinOperator_EXPAND_DIMS, OperatorType::kExpandDims));
ops.push_back(std::make_unique<TransposeConv>(
::tflite::BuiltinOperator_TRANSPOSE_CONV, OperatorType::kTransposeConv));
ops.push_back(std::make_unique<SparseToDense>(
::tflite::BuiltinOperator_SPARSE_TO_DENSE, OperatorType::kSparseToDense));
ops.push_back(std::make_unique<Shape>(::tflite::BuiltinOperator_SHAPE,
OperatorType::kShape));
ops.push_back(std::make_unique<FakeQuant>(
::tflite::BuiltinOperator_FAKE_QUANT, OperatorType::kFakeQuant));
ops.push_back(std::make_unique<Pack>(::tflite::BuiltinOperator_PACK,
OperatorType::kPack));
ops.emplace_back(std::make_unique<UnidirectionalSequenceLstm>(
::tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
OperatorType::kUnidirectionalSequenceLstm));
ops.emplace_back(std::make_unique<BidirectionalSequenceLstm>(
::tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
OperatorType::kBidirectionalSequenceLstm));
ops.emplace_back(std::make_unique<BidirectionalSequenceRnn>(
::tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
OperatorType::kBidirectionalSequenceRnn));
ops.push_back(std::make_unique<OneHot>(::tflite::BuiltinOperator_ONE_HOT,
OperatorType::kOneHot));
ops.push_back(std::make_unique<Unpack>(::tflite::BuiltinOperator_UNPACK,
OperatorType::kUnpack));
ops.push_back(std::make_unique<LeakyRelu>(
::tflite::BuiltinOperator_LEAKY_RELU, OperatorType::kLeakyRelu));
ops.push_back(std::make_unique<SquaredDifference>(
::tflite::BuiltinOperator_SQUARED_DIFFERENCE,
OperatorType::kSquaredDifference));
ops.push_back(std::make_unique<MirrorPad>(
::tflite::BuiltinOperator_MIRROR_PAD, OperatorType::kMirrorPad));
ops.push_back(std::make_unique<Unique>(::tflite::BuiltinOperator_UNIQUE,
OperatorType::kUnique));
ops.push_back(std::make_unique<UnidirectionalSequenceRnn>(
::tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
OperatorType::kUnidirectionalSequenceRnn));
ops.push_back(std::make_unique<Where>(::tflite::BuiltinOperator_WHERE,
OperatorType::kWhere));
ops.push_back(std::make_unique<ReverseSequence>(
::tflite::BuiltinOperator_REVERSE_SEQUENCE,
OperatorType::kReverseSequence));
ops.push_back(std::make_unique<SimpleOperator<MatrixDiagOperator>>(
::tflite::BuiltinOperator_MATRIX_DIAG, OperatorType::kMatrixDiag));
ops.push_back(std::make_unique<SimpleOperator<MatrixSetDiagOperator>>(
::tflite::BuiltinOperator_MATRIX_SET_DIAG, OperatorType::kMatrixSetDiag));
ops.push_back(std::make_unique<CTCBeamSearchDecoder>(
"CTC_BEAM_SEARCH_DECODER", OperatorType::kCTCBeamSearchDecoder));
ops.push_back(std::make_unique<TensorFlowUnsupported>(
"TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported,
enable_select_tf_ops));
ops.push_back(std::make_unique<SimpleOperator<FloorOperator>>(
::tflite::BuiltinOperator_FLOOR, OperatorType::kFloor));
ops.push_back(std::make_unique<SimpleOperator<CeilOperator>>(
::tflite::BuiltinOperator_CEIL, OperatorType::kCeil));
ops.push_back(std::make_unique<SimpleOperator<EluOperator>>(
::tflite::BuiltinOperator_ELU, OperatorType::kElu));
ops.push_back(std::make_unique<SimpleOperator<RoundOperator>>(
::tflite::BuiltinOperator_ROUND, OperatorType::kRound));
ops.push_back(std::make_unique<SimpleOperator<ReluOperator>>(
::tflite::BuiltinOperator_RELU, OperatorType::kRelu));
ops.push_back(std::make_unique<SimpleOperator<Relu1Operator>>(
::tflite::BuiltinOperator_RELU_N1_TO_1, OperatorType::kRelu1));
ops.push_back(std::make_unique<SimpleOperator<Relu6Operator>>(
::tflite::BuiltinOperator_RELU6, OperatorType::kRelu6));
ops.push_back(std::make_unique<SimpleOperator<PReluOperator>>(
::tflite::BuiltinOperator_PRELU, OperatorType::kPRelu));
ops.push_back(std::make_unique<SimpleOperator<LogisticOperator>>(
::tflite::BuiltinOperator_LOGISTIC, OperatorType::kLogistic));
ops.push_back(std::make_unique<SimpleOperator<TanhOperator>>(
::tflite::BuiltinOperator_TANH, OperatorType::kTanh));
ops.push_back(std::make_unique<SimpleOperator<ExpOperator>>(
::tflite::BuiltinOperator_EXP, OperatorType::kExp));
ops.push_back(std::make_unique<SimpleOperator<CosOperator>>(
::tflite::BuiltinOperator_COS, OperatorType::kCos));
ops.push_back(std::make_unique<SimpleOperator<LogSoftmaxOperator>>(
::tflite::BuiltinOperator_LOG_SOFTMAX, OperatorType::kLogSoftmax));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowMaximumOperator>>(
::tflite::BuiltinOperator_MAXIMUM, OperatorType::kMaximum));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowMinimumOperator>>(
::tflite::BuiltinOperator_MINIMUM, OperatorType::kMinimum));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowGreaterOperator>>(
::tflite::BuiltinOperator_GREATER, OperatorType::kGreater));
ops.push_back(
std::make_unique<SimpleOperator<TensorFlowGreaterEqualOperator>>(
::tflite::BuiltinOperator_GREATER_EQUAL,
OperatorType::kGreaterEqual));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowLessOperator>>(
::tflite::BuiltinOperator_LESS, OperatorType::kLess));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowLessEqualOperator>>(
::tflite::BuiltinOperator_LESS_EQUAL, OperatorType::kLessEqual));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowEqualOperator>>(
::tflite::BuiltinOperator_EQUAL, OperatorType::kEqual));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowNotEqualOperator>>(
::tflite::BuiltinOperator_NOT_EQUAL, OperatorType::kNotEqual));
ops.push_back(std::make_unique<SimpleOperator<NegOperator>>(
::tflite::BuiltinOperator_NEG, OperatorType::kNeg));
ops.push_back(std::make_unique<SimpleOperator<SelectOperator>>(
::tflite::BuiltinOperator_SELECT, OperatorType::kSelect));
ops.push_back(std::make_unique<SimpleOperator<SliceOperator>>(
::tflite::BuiltinOperator_SLICE, OperatorType::kSlice));
ops.push_back(std::make_unique<SimpleOperator<PowOperator>>(
::tflite::BuiltinOperator_POW, OperatorType::kPow));
ops.push_back(std::make_unique<SimpleOperator<LogicalOrOperator>>(
::tflite::BuiltinOperator_LOGICAL_OR, OperatorType::kLogicalOr));
ops.emplace_back(new SimpleOperator<LogicalAndOperator>(
::tflite::BuiltinOperator_LOGICAL_AND, OperatorType::kLogicalAnd));
ops.emplace_back(new SimpleOperator<LogicalNotOperator>(
::tflite::BuiltinOperator_LOGICAL_NOT, OperatorType::kLogicalNot));
ops.emplace_back(new SimpleOperator<FloorDivOperator>(
::tflite::BuiltinOperator_FLOOR_DIV, OperatorType::kFloorDiv));
ops.emplace_back(new SimpleOperator<FloorModOperator>(
::tflite::BuiltinOperator_FLOOR_MOD, OperatorType::kFloorMod));
ops.emplace_back(new SimpleOperator<RangeOperator>(
::tflite::BuiltinOperator_RANGE, OperatorType::kRange));
ops.push_back(std::make_unique<SimpleOperator<SinOperator>>(
::tflite::BuiltinOperator_SIN, OperatorType::kSin));
ops.push_back(std::make_unique<SimpleOperator<LogOperator>>(
::tflite::BuiltinOperator_LOG, OperatorType::kLog));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowSqrtOperator>>(
::tflite::BuiltinOperator_SQRT, OperatorType::kSqrt));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowRsqrtOperator>>(
::tflite::BuiltinOperator_RSQRT, OperatorType::kRsqrt));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowSquareOperator>>(
::tflite::BuiltinOperator_SQUARE, OperatorType::kSquare));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowZerosLikeOperator>>(
::tflite::BuiltinOperator_ZEROS_LIKE, OperatorType::kZerosLike));
ops.push_back(std::make_unique<SimpleOperator<AbsOperator>>(
::tflite::BuiltinOperator_ABS, OperatorType::kAbs));
ops.push_back(std::make_unique<SimpleOperator<HardSwishOperator>>(
::tflite::BuiltinOperator_HARD_SWISH, OperatorType::kHardSwish));
ops.push_back(std::make_unique<SimpleOperator<FillOperator>>(
::tflite::BuiltinOperator_FILL, OperatorType::kFill));
ops.push_back(std::make_unique<SimpleOperator<ReverseV2Operator>>(
::tflite::BuiltinOperator_REVERSE_V2, OperatorType::kReverseV2));
ops.push_back(std::make_unique<SimpleOperator<TensorFlowRankOperator>>(
::tflite::BuiltinOperator_RANK, OperatorType::kRank));
ops.emplace_back(new SimpleOperator<SegmentSumOperator>(
::tflite::BuiltinOperator_SEGMENT_SUM, OperatorType::kSegmentSum));
ops.emplace_back(std::make_unique<SimpleOperator<ScatterNdOperator>>(
::tflite::BuiltinOperator_SCATTER_ND, OperatorType::kScatterNd));
return ops;
}
}
std::map<OperatorType, std::unique_ptr<BaseOperator>> BuildOperatorByTypeMap(
bool enable_select_tf_ops) {
std::map<OperatorType, std::unique_ptr<BaseOperator>> result;
std::vector<std::unique_ptr<BaseOperator>> ops =
BuildOperatorList(enable_select_tf_ops);
for (auto& op : ops) {
result[op->type()] = std::move(op);
}
return result;
}
std::map<std::string, std::unique_ptr<BaseOperator>> BuildOperatorByNameMap(
bool enable_select_tf_ops) {
std::map<std::string, std::unique_ptr<BaseOperator>> result;
std::vector<std::unique_ptr<BaseOperator>> ops =
BuildOperatorList(enable_select_tf_ops);
for (auto& op : ops) {
result[op->name()] = std::move(op);
}
return result;
}
bool ShouldExportAsFlexOp(bool enable_select_tf_ops,
const std::string& tensorflow_op_name) {
if (!enable_select_tf_ops) {
return false;
}
const tensorflow::OpDef* op_def = nullptr;
if (!tensorflow::OpRegistry::Global()
->LookUpOpDef(tensorflow_op_name, &op_def)
.ok()) {
return false;
}
if (!::tflite::flex::IsAllowlistedFlexOp(tensorflow_op_name)) {
LOG(WARNING) << "Op " << tensorflow_op_name
<< " is a valid TensorFlow op but has not been allowlisted for"
" the TensorFlow Lite flex op set.";
return false;
}
return true;
}
}
} | #include "tensorflow/lite/toco/tflite/operator.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace tflite {
namespace {
class OperatorTest : public ::testing::Test {
protected:
const BaseOperator& GetOperator(const std::string& name, OperatorType type) {
using OpsByName = std::map<std::string, std::unique_ptr<BaseOperator>>;
using OpsByType = std::map<OperatorType, std::unique_ptr<BaseOperator>>;
static auto* by_name = new OpsByName(BuildOperatorByNameMap());
static auto* by_type = new OpsByType(BuildOperatorByTypeMap());
CHECK(by_name->count(name)) << "No operator for '" << name << "'.";
BaseOperator* op1 = by_name->at(name).get();
CHECK(op1->type() == type) << "while verifying '" << name << "'.";
CHECK(by_type->count(type))
<< "No operator for '" << OperatorTypeName(type) << "'.";
BaseOperator* op2 = by_type->at(type).get();
CHECK(op2->name() == name)
<< "while verifying '" << OperatorTypeName(type) << "'.";
return *op1;
}
template <typename T>
std::unique_ptr<T> SerializeAndDeserialize(const BaseOperator& op,
const T& toco_op,
Options* options = nullptr) {
flatbuffers::FlatBufferBuilder builder;
Options input_options = op.Serialize(toco_op, &builder);
if (options) {
*options = input_options;
}
builder.Finish(CreateOperator(builder, 0, 0, 0, input_options.type,
input_options.builtin, input_options.custom,
::tflite::CustomOptionsFormat_FLEXBUFFERS));
auto* output_options =
flatbuffers::GetRoot<::tflite::Operator>(builder.GetBufferPointer());
auto new_toco_op = op.Deserialize(output_options->builtin_options(),
output_options->custom_options());
CHECK(new_toco_op->type == toco_op.type)
<< "The type of the serialized and deserialized"
<< HelpfulOperatorTypeName(*new_toco_op)
<< " does not match the type of the original "
<< HelpfulOperatorTypeName(toco_op);
return std::unique_ptr<T>(dynamic_cast<T*>(new_toco_op.release()));
}
template <typename T>
void CheckSimpleOperator(const std::string& name, OperatorType type) {
Options options;
auto output_toco_op =
SerializeAndDeserialize(GetOperator(name, type), T(), &options);
ASSERT_EQ(0, options.builtin.o);
ASSERT_EQ(0, options.custom.o);
ASSERT_EQ(::tflite::BuiltinOptions_NONE, options.type);
ASSERT_NE(nullptr, output_toco_op.get());
}
template <typename T>
void CheckReducerOperator(const std::string& name, OperatorType type) {
T op;
op.keep_dims = false;
auto output_toco_op = SerializeAndDeserialize(GetOperator(name, type), op);
EXPECT_EQ(op.keep_dims, output_toco_op->keep_dims);
}
};
TEST_F(OperatorTest, SimpleOperators) {
CheckSimpleOperator<FloorOperator>("FLOOR", OperatorType::kFloor);
CheckSimpleOperator<CeilOperator>("CEIL", OperatorType::kCeil);
CheckSimpleOperator<EluOperator>("ELU", OperatorType::kElu);
CheckSimpleOperator<RoundOperator>("ROUND", OperatorType::kRound);
CheckSimpleOperator<ReluOperator>("RELU", OperatorType::kRelu);
CheckSimpleOperator<Relu1Operator>("RELU_N1_TO_1", OperatorType::kRelu1);
CheckSimpleOperator<Relu6Operator>("RELU6", OperatorType::kRelu6);
CheckSimpleOperator<LogisticOperator>("LOGISTIC", OperatorType::kLogistic);
CheckSimpleOperator<TanhOperator>("TANH", OperatorType::kTanh);
CheckSimpleOperator<ExpOperator>("EXP", OperatorType::kExp);
CheckSimpleOperator<CosOperator>("COS", OperatorType::kCos);
CheckSimpleOperator<LogSoftmaxOperator>("LOG_SOFTMAX",
OperatorType::kLogSoftmax);
CheckSimpleOperator<TensorFlowMaximumOperator>(
"MAXIMUM", OperatorType::kMaximum);
CheckSimpleOperator<TensorFlowMinimumOperator>(
"MINIMUM", OperatorType::kMinimum);
CheckSimpleOperator<TensorFlowLessOperator>("LESS", OperatorType::kLess);
CheckSimpleOperator<NegOperator>("NEG", OperatorType::kNeg);
CheckSimpleOperator<SelectOperator>("SELECT", OperatorType::kSelect);
CheckSimpleOperator<SliceOperator>("SLICE", OperatorType::kSlice);
CheckSimpleOperator<SinOperator>("SIN", OperatorType::kSin);
CheckSimpleOperator<TensorFlowEqualOperator>("EQUAL", OperatorType::kEqual);
CheckSimpleOperator<TensorFlowNotEqualOperator>("NOT_EQUAL",
OperatorType::kNotEqual);
CheckSimpleOperator<LogOperator>("LOG", OperatorType::kLog);
CheckSimpleOperator<TensorFlowSqrtOperator>("SQRT", OperatorType::kSqrt);
CheckSimpleOperator<TensorFlowRsqrtOperator>("RSQRT", OperatorType::kRsqrt);
CheckSimpleOperator<PowOperator>("POW", OperatorType::kPow);
CheckSimpleOperator<LogicalOrOperator>("LOGICAL_OR",
OperatorType::kLogicalOr);
CheckSimpleOperator<LogicalAndOperator>("LOGICAL_AND",
OperatorType::kLogicalAnd);
CheckSimpleOperator<LogicalNotOperator>("LOGICAL_NOT",
OperatorType::kLogicalNot);
CheckSimpleOperator<FloorDivOperator>("FLOOR_DIV", OperatorType::kFloorDiv);
CheckSimpleOperator<TensorFlowSquareOperator>("SQUARE",
OperatorType::kSquare);
CheckSimpleOperator<TensorFlowZerosLikeOperator>("ZEROS_LIKE",
OperatorType::kZerosLike);
CheckSimpleOperator<FloorModOperator>("FLOOR_MOD", OperatorType::kFloorMod);
CheckSimpleOperator<RangeOperator>("RANGE", OperatorType::kRange);
CheckSimpleOperator<FillOperator>("FILL", OperatorType::kFill);
CheckSimpleOperator<ReverseV2Operator>("REVERSE_V2",
OperatorType::kReverseV2);
CheckSimpleOperator<TensorFlowRankOperator>("RANK", OperatorType::kRank);
}
TEST_F(OperatorTest, BuiltinAdd) {
AddOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("ADD", OperatorType::kAdd), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinAddN) {
AddNOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("ADD_N", OperatorType::kAddN), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinReducerOps) {
CheckReducerOperator<MeanOperator>("MEAN", OperatorType::kMean);
CheckReducerOperator<TensorFlowSumOperator>("SUM", OperatorType::kSum);
CheckReducerOperator<TensorFlowProdOperator>("REDUCE_PROD",
OperatorType::kReduceProd);
CheckReducerOperator<TensorFlowMaxOperator>("REDUCE_MAX",
OperatorType::kReduceMax);
CheckReducerOperator<TensorFlowMinOperator>("REDUCE_MIN",
OperatorType::kReduceMin);
CheckReducerOperator<TensorFlowAnyOperator>("REDUCE_ANY", OperatorType::kAny);
}
TEST_F(OperatorTest, BuiltinCast) {
CastOperator op;
op.src_data_type = ArrayDataType::kFloat;
op.dst_data_type = ArrayDataType::kUint8;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("CAST", OperatorType::kCast), op);
EXPECT_EQ(op.src_data_type, output_toco_op->src_data_type);
EXPECT_EQ(op.dst_data_type, output_toco_op->dst_data_type);
}
TEST_F(OperatorTest, CustomConcatenation) {
ConcatenationOperator op;
op.axis = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("CONCATENATION", OperatorType::kConcatenation), op);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, CustomDepthToSpace) {
DepthToSpaceOperator op;
op.block_size = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEPTH_TO_SPACE", OperatorType::kDepthToSpace), op);
EXPECT_EQ(op.block_size, output_toco_op->block_size);
}
TEST_F(OperatorTest, CustomFakeQuant) {
FakeQuantOperator op;
auto* minmax = new MinMax;
minmax->min = -10;
minmax->max = 200;
op.minmax.reset(minmax);
op.num_bits = 16;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("FAKE_QUANT", OperatorType::kFakeQuant), op);
EXPECT_EQ(op.minmax->min, output_toco_op->minmax->min);
EXPECT_EQ(op.minmax->max, output_toco_op->minmax->max);
EXPECT_EQ(op.num_bits, output_toco_op->num_bits);
}
TEST_F(OperatorTest, CustomFullyConnected) {
FullyConnectedOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("FULLY_CONNECTED", OperatorType::kFullyConnected), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinGather) {
GatherOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("GATHER", OperatorType::kGather), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinGatherNd) {
GatherNdOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("GATHER_ND", OperatorType::kGatherNd), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinWhere) {
WhereOperator op;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("WHERE", OperatorType::kWhere), op);
ASSERT_NE(output_toco_op.get(), nullptr);
}
TEST_F(OperatorTest, BuiltinL2Pool) {
L2PoolOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("L2_POOL_2D", OperatorType::kL2Pool), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinLocalResponseNormalization) {
LocalResponseNormalizationOperator op;
op.range = 123;
op.bias = 1.23;
op.alpha = 12.3;
op.beta = .123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("LOCAL_RESPONSE_NORMALIZATION",
OperatorType::kLocalResponseNormalization),
op);
EXPECT_EQ(op.range, output_toco_op->range);
EXPECT_EQ(op.bias, output_toco_op->bias);
EXPECT_EQ(op.alpha, output_toco_op->alpha);
EXPECT_EQ(op.beta, output_toco_op->beta);
}
TEST_F(OperatorTest, BuiltinMaxPool) {
MaxPoolOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("MAX_POOL_2D", OperatorType::kMaxPool), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinReshape) {
TensorFlowReshapeOperator op;
op.shape = {1, 2, 4, 5, 8};
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESHAPE", OperatorType::kReshape), op);
EXPECT_EQ(op.shape, output_toco_op->shape);
}
TEST_F(OperatorTest, CustomSoftmax) {
SoftmaxOperator op;
op.beta = 123.1;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SOFTMAX", OperatorType::kSoftmax), op);
EXPECT_EQ(op.beta, output_toco_op->beta);
}
TEST_F(OperatorTest, BuiltinSpaceToDepth) {
SpaceToDepthOperator op;
op.block_size = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SPACE_TO_DEPTH", OperatorType::kSpaceToDepth), op);
EXPECT_EQ(op.block_size, output_toco_op->block_size);
}
TEST_F(OperatorTest, CustomSplit) {
TensorFlowSplitOperator op;
op.num_split = 123;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SPLIT", OperatorType::kSplit), op);
EXPECT_EQ(op.num_split, output_toco_op->num_split);
}
TEST_F(OperatorTest, CustomSplitV) {
TensorFlowSplitVOperator op;
op.num_split = 123;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SPLIT_V", OperatorType::kSplitV), op);
EXPECT_EQ(op.num_split, output_toco_op->num_split);
}
TEST_F(OperatorTest, BuiltinAveragePool) {
AveragePoolOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.kwidth = 480;
op.kheight = 1080;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("AVERAGE_POOL_2D", OperatorType::kAveragePool), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.kwidth, output_toco_op->kwidth);
EXPECT_EQ(op.kheight, output_toco_op->kheight);
}
TEST_F(OperatorTest, BuiltinConvolution) {
ConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("CONV_2D", OperatorType::kConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinDepthwiseConvolution) {
DepthwiseConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
op.depth_multiplier = 6;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEPTHWISE_CONV_2D", OperatorType::kDepthwiseConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
EXPECT_EQ(op.depth_multiplier, output_toco_op->depth_multiplier);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinL2Norm) {
L2NormalizationOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("L2_NORMALIZATION", OperatorType::kL2Normalization), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, BuiltinMul) {
MulOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu6;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("MUL", OperatorType::kMul), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
}
TEST_F(OperatorTest, ResizeBilinear) {
ResizeBilinearOperator op;
op.align_corners = true;
op.half_pixel_centers = false;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear), op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeBilinear_HalfPixelCenters) {
ResizeBilinearOperator op;
op.align_corners = true;
op.half_pixel_centers = true;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear), op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeNearestNeighbor) {
ResizeNearestNeighborOperator op;
op.align_corners = true;
op.half_pixel_centers = false;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("RESIZE_NEAREST_NEIGHBOR",
OperatorType::kResizeNearestNeighbor),
op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, ResizeNearestNeighbor_HalfPixelCenters) {
ResizeNearestNeighborOperator op;
op.align_corners = true;
op.half_pixel_centers = true;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("RESIZE_NEAREST_NEIGHBOR",
OperatorType::kResizeNearestNeighbor),
op);
EXPECT_EQ(op.align_corners, output_toco_op->align_corners);
EXPECT_EQ(op.half_pixel_centers, output_toco_op->half_pixel_centers);
}
TEST_F(OperatorTest, Svdf) {
SvdfOperator op;
op.fused_activation_function = FusedActivationFunctionType::kRelu;
op.rank = 1;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SVDF", OperatorType::kSvdf), op);
EXPECT_EQ(op.fused_activation_function,
output_toco_op->fused_activation_function);
EXPECT_EQ(op.rank, output_toco_op->rank);
}
TEST_F(OperatorTest, Squeeze) {
SqueezeOperator op;
op.squeeze_dims = {-2, -3, 4, 1, 4};
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SQUEEZE", OperatorType::kSqueeze), op);
EXPECT_EQ(op.squeeze_dims, output_toco_op->squeeze_dims);
}
TEST_F(OperatorTest, StridedSlice) {
StridedSliceOperator op;
op.begin_mask = 1;
op.end_mask = 2;
op.ellipsis_mask = 1;
op.new_axis_mask = 1;
op.shrink_axis_mask = 2;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("STRIDED_SLICE", OperatorType::kStridedSlice), op);
EXPECT_EQ(op.start_indices, output_toco_op->start_indices);
EXPECT_EQ(op.stop_indices, output_toco_op->stop_indices);
EXPECT_EQ(op.strides, output_toco_op->strides);
EXPECT_EQ(op.begin_mask, output_toco_op->begin_mask);
EXPECT_EQ(op.end_mask, output_toco_op->end_mask);
EXPECT_EQ(op.end_mask, output_toco_op->end_mask);
EXPECT_EQ(op.ellipsis_mask, output_toco_op->ellipsis_mask);
EXPECT_EQ(op.new_axis_mask, output_toco_op->new_axis_mask);
EXPECT_EQ(op.shrink_axis_mask, output_toco_op->shrink_axis_mask);
}
TEST_F(OperatorTest, BuiltinTopKV2) {
TopKV2Operator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TOPK_V2", OperatorType::kTopK_V2), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinArgMax) {
ArgMaxOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ARG_MAX", OperatorType::kArgMax), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinArgMin) {
ArgMinOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ARG_MIN", OperatorType::kArgMin), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinDequantize) {
DequantizeOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("DEQUANTIZE", OperatorType::kDequantize), op);
}
TEST_F(OperatorTest, BuiltinTransposeConv) {
TransposeConvOperator op;
op.stride_width = 123;
op.stride_height = 124;
op.padding.type = PaddingType::kValid;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TRANSPOSE_CONV", OperatorType::kTransposeConv), op);
EXPECT_EQ(op.stride_width, output_toco_op->stride_width);
EXPECT_EQ(op.stride_height, output_toco_op->stride_height);
EXPECT_EQ(op.padding.type, output_toco_op->padding.type);
}
TEST_F(OperatorTest, BuiltinShape) {
TensorFlowShapeOperator op;
op.output_data_type = ArrayDataType::kInt64;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("SHAPE", OperatorType::kShape), op);
EXPECT_EQ(op.output_data_type, output_toco_op->output_data_type);
}
TEST_F(OperatorTest, BuiltinSparseToDense) {
SparseToDenseOperator op;
op.validate_indices = false;
std::unique_ptr<toco::SparseToDenseOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("SPARSE_TO_DENSE", OperatorType::kSparseToDense), op);
EXPECT_EQ(op.validate_indices, output_toco_op->validate_indices);
}
TEST_F(OperatorTest, VersioningSpareToDense) {
SparseToDenseOperator op;
op.inputs = {"indices", "output_shape", "input_values", "default_value"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model int32_model;
Array& int32_array = int32_model.GetOrCreateArray(op.inputs[2]);
int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &op, .model = &int32_model};
EXPECT_EQ(base_op->GetVersion(int32_signature), 1);
Model int64_model;
Array& int64_array = int64_model.GetOrCreateArray(op.inputs[2]);
int64_array.data_type = ArrayDataType::kInt64;
OperatorSignature int64_signature = {.op = &op, .model = &int64_model};
EXPECT_EQ(base_op->GetVersion(int64_signature), 2);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[2]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 3);
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[2]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 3);
}
TEST_F(OperatorTest, BuiltinPack) {
PackOperator op;
op.values_count = 3;
op.axis = 1;
std::unique_ptr<toco::PackOperator> output_toco_op =
SerializeAndDeserialize(GetOperator("PACK", OperatorType::kPack), op);
EXPECT_EQ(op.values_count, output_toco_op->values_count);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinOneHot) {
OneHotOperator op;
op.axis = 2;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("ONE_HOT", OperatorType::kOneHot), op);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinUnpack) {
UnpackOperator op;
op.num = 5;
op.axis = 2;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("UNPACK", OperatorType::kUnpack), op);
EXPECT_EQ(op.num, output_toco_op->num);
EXPECT_EQ(op.axis, output_toco_op->axis);
}
TEST_F(OperatorTest, BuiltinLeakyRelu) {
LeakyReluOperator op;
op.alpha = 3;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("LEAKY_RELU", OperatorType::kLeakyRelu), op);
EXPECT_EQ(op.alpha, output_toco_op->alpha);
}
TEST_F(OperatorTest, BuiltinSquaredDifference) {
SquaredDifferenceOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SQUARED_DIFFERENCE", OperatorType::kSquaredDifference), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinScatterNd) {
ScatterNdOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SCATTER_ND", OperatorType::kScatterNd), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, CustomCTCBeamSearchDecoder) {
CTCBeamSearchDecoderOperator op;
op.beam_width = 3;
op.top_paths = 2;
op.merge_repeated = false;
std::unique_ptr<toco::CTCBeamSearchDecoderOperator> output_toco_op =
SerializeAndDeserialize(GetOperator("CTC_BEAM_SEARCH_DECODER",
OperatorType::kCTCBeamSearchDecoder),
op);
EXPECT_EQ(op.beam_width, output_toco_op->beam_width);
EXPECT_EQ(op.top_paths, output_toco_op->top_paths);
EXPECT_EQ(op.merge_repeated, output_toco_op->merge_repeated);
}
TEST_F(OperatorTest, TensorFlowUnsupported) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
::tensorflow::NodeDef node_def;
auto attr = node_def.mutable_attr();
(*attr)["float_attr"].set_f(2.0);
(*attr)["str_attr"].set_s("Hello World");
(*attr)["int_attr"].set_i(17);
(*attr)["bool_attr"].set_b(true);
{
auto* list = (*attr)["list_string_attr"].mutable_list();
list->add_s("abcde");
list->add_s("1234");
list->add_s("");
list->add_s("zyxwv");
list->add_s("!-.");
}
{
auto* list = (*attr)["list_float_attr"].mutable_list();
list->add_f(std::numeric_limits<float>::min());
list->add_f(2.0);
list->add_f(-std::numeric_limits<float>::max());
}
{
auto* list = (*attr)["list_int_attr"].mutable_list();
list->add_i(1);
list->add_i(20);
list->add_i(1LL << 40);
list->add_i(-(1LL << 40));
}
node_def.SerializeToString(&op.tensorflow_node_def);
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported), op);
::tensorflow::NodeDef output_node_def;
output_node_def.ParseFromString(output_toco_op->tensorflow_node_def);
const auto& output_attr = output_node_def.attr();
EXPECT_EQ(2.0, output_attr.at("float_attr").f());
EXPECT_EQ("Hello World", output_attr.at("str_attr").s());
EXPECT_EQ(17, output_attr.at("int_attr").i());
EXPECT_EQ(true, output_attr.at("bool_attr").b());
{
const auto& list = output_attr.at("list_string_attr").list();
ASSERT_EQ(5, list.s_size());
EXPECT_EQ("abcde", list.s(0));
EXPECT_EQ("1234", list.s(1));
EXPECT_EQ("", list.s(2));
EXPECT_EQ("zyxwv", list.s(3));
EXPECT_EQ("!-.", list.s(4));
}
{
const auto& list = output_attr.at("list_float_attr").list();
ASSERT_EQ(3, list.f_size());
EXPECT_EQ(std::numeric_limits<float>::min(), list.f(0));
EXPECT_EQ(2.0, list.f(1));
EXPECT_EQ(-std::numeric_limits<float>::max(), list.f(2));
}
{
const auto& list = output_attr.at("list_int_attr").list();
ASSERT_EQ(4, list.i_size());
EXPECT_EQ(1, list.i(0));
EXPECT_EQ(20, list.i(1));
EXPECT_EQ(1LL << 40, list.i(2));
EXPECT_EQ(-(1LL << 40), list.i(3));
}
}
TEST_F(OperatorTest, TensorFlowUnsupportedWithoutAttr) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
auto output_toco_op = SerializeAndDeserialize(
GetOperator("TENSORFLOW_UNSUPPORTED", OperatorType::kUnsupported), op);
::tensorflow::NodeDef output_node_def;
output_node_def.ParseFromString(output_toco_op->tensorflow_node_def);
EXPECT_TRUE(output_node_def.attr().empty());
}
TEST_F(OperatorTest, TestShouldExportAsFlexOp) {
EXPECT_FALSE(ShouldExportAsFlexOp(false, "Conv2D"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "Conv2D"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "EluGrad"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "RFFT"));
EXPECT_FALSE(ShouldExportAsFlexOp(true, "MyAwesomeCustomOp"));
EXPECT_TRUE(ShouldExportAsFlexOp(true, "RandomShuffle"));
}
TEST_F(OperatorTest, BuiltinMirrorPad) {
MirrorPadOperator op;
op.mode = MirrorPadMode::kReflect;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("MIRROR_PAD", OperatorType::kMirrorPad), op);
EXPECT_EQ(op.mode, output_toco_op->mode);
}
TEST_F(OperatorTest, BuiltinUnique) {
UniqueOperator op;
op.idx_out_type = ArrayDataType::kInt64;
auto output_toco_op =
SerializeAndDeserialize(GetOperator("UNIQUE", OperatorType::kUnique), op);
ASSERT_NE(nullptr, output_toco_op.get());
EXPECT_EQ(output_toco_op->idx_out_type, op.idx_out_type);
}
TEST_F(OperatorTest, BuiltinSegmentSum) {
SegmentSumOperator op;
auto output_toco_op = SerializeAndDeserialize(
GetOperator("SEGMENT_SUM", OperatorType::kSegmentSum), op);
ASSERT_NE(nullptr, output_toco_op.get());
}
TEST_F(OperatorTest, BuiltinReverseSequence) {
ReverseSequenceOperator op;
op.seq_dim = 3;
op.batch_dim = 1;
std::unique_ptr<toco::ReverseSequenceOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("REVERSE_SEQUENCE", OperatorType::kReverseSequence), op);
EXPECT_EQ(op.seq_dim, output_toco_op->seq_dim);
EXPECT_EQ(op.batch_dim, output_toco_op->batch_dim);
}
TEST_F(OperatorTest, BuiltinMatrixDiag) {
MatrixDiagOperator op;
std::unique_ptr<toco::MatrixDiagOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("MATRIX_DIAG", OperatorType::kMatrixDiag), op);
}
TEST_F(OperatorTest, BuiltinMatrixSetDiag) {
MatrixSetDiagOperator op;
std::unique_ptr<toco::MatrixSetDiagOperator> output_toco_op =
SerializeAndDeserialize(
GetOperator("MATRIX_SET_DIAG", OperatorType::kMatrixSetDiag), op);
}
template <typename Op>
void SimpleVersioningTest() {
Op op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
template <typename Op>
void SimpleOutputVersioningTest() {
Op op;
op.outputs = {"output1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.outputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.outputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
TEST_F(OperatorTest, VersioningEqualTest) {
SimpleVersioningTest<TensorFlowEqualOperator>();
}
TEST_F(OperatorTest, VersioningNotEqualTest) {
SimpleVersioningTest<TensorFlowNotEqualOperator>();
}
TEST_F(OperatorTest, VersioningLessTest) {
SimpleVersioningTest<TensorFlowLessOperator>();
}
TEST_F(OperatorTest, VersioningLessEqualTest) {
SimpleVersioningTest<TensorFlowLessEqualOperator>();
}
TEST_F(OperatorTest, VersioningGreaterTest) {
SimpleVersioningTest<TensorFlowGreaterOperator>();
}
TEST_F(OperatorTest, VersioningGreaterEqualTest) {
SimpleVersioningTest<TensorFlowGreaterEqualOperator>();
}
TEST_F(OperatorTest, VersioningSpaceToBatchNDTest) {
SpaceToBatchNDOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.copy_shape({1, 2, 2, 2});
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.copy_shape({1, 2, 2, 2});
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
Model float_model;
Array& float_array = float_model.GetOrCreateArray(op.inputs[0]);
float_array.copy_shape({1, 2, 2});
float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &op, .model = &float_model};
EXPECT_EQ(base_op->GetVersion(float_signature), 3);
}
TEST_F(OperatorTest, VersioningLogSoftmaxTest) {
SimpleVersioningTest<LogSoftmaxOperator>();
}
TEST_F(OperatorTest, VersioningPackTest) {
SimpleVersioningTest<PackOperator>();
}
TEST_F(OperatorTest, VersioningUnpackTest) {
UnpackOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model int32_model;
Array& int32_array = int32_model.GetOrCreateArray(op.inputs[0]);
int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &op, .model = &int32_model};
EXPECT_EQ(base_op->GetVersion(int32_signature), 1);
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 2);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
}
TEST_F(OperatorTest, VersioningBatchToSpaceNDTest) {
BatchToSpaceNDOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
uint8_array.copy_shape({1, 2, 2, 2});
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
int8_array.copy_shape({1, 2, 2, 2});
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
Model float_model;
Array& float_array = float_model.GetOrCreateArray(op.inputs[0]);
float_array.copy_shape({1, 2, 2});
float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &op, .model = &float_model};
EXPECT_EQ(base_op->GetVersion(float_signature), 3);
}
TEST_F(OperatorTest, VersioningTanhTest) {
SimpleVersioningTest<TanhOperator>();
}
TEST_F(OperatorTest, VersioningStridedSliceTest) {
StridedSliceOperator op;
op.inputs = {"input1"};
op.ellipsis_mask = 0;
op.new_axis_mask = 0;
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model uint8_model;
Array& uint8_array = uint8_model.GetOrCreateArray(op.inputs[0]);
uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &op, .model = &uint8_model};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& int8_array = int8_model.GetOrCreateArray(op.inputs[0]);
int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &op, .model = &int8_model};
EXPECT_EQ(base_op->GetVersion(int8_signature), 2);
Model bool_model;
Array& bool_array = bool_model.GetOrCreateArray(op.inputs[0]);
bool_array.data_type = ArrayDataType::kBool;
OperatorSignature bool_signature = {.op = &op, .model = &bool_model};
EXPECT_EQ(base_op->GetVersion(bool_signature), 3);
op.start_indices = {0, 0, 0, 0, 0};
op.stop_indices = {1, 2, 2, 2, 2};
op.strides = {1, 1, 1, 1, 1};
EXPECT_EQ(base_op->GetVersion(uint8_signature), 4);
EXPECT_EQ(base_op->GetVersion(int8_signature), 4);
EXPECT_EQ(base_op->GetVersion(bool_signature), 4);
}
TEST_F(OperatorTest, VersioningSpaceToDepthTest) {
SimpleVersioningTest<SpaceToDepthOperator>();
}
TEST_F(OperatorTest, VersioningSliceTest) {
SimpleVersioningTest<SliceOperator>();
SliceOperator op;
op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model string_model;
Array& string_array = string_model.GetOrCreateArray(op.inputs[0]);
string_array.data_type = ArrayDataType::kString;
OperatorSignature string_signature = {.op = &op, .model = &string_model};
EXPECT_EQ(base_op->GetVersion(string_signature), 3);
}
TEST_F(OperatorTest, VersioningLogisticTest) {
SimpleVersioningTest<LogisticOperator>();
}
TEST_F(OperatorTest, VersioningL2NormTest) {
SimpleOutputVersioningTest<L2NormalizationOperator>();
}
TEST_F(OperatorTest, VersioningMaxTest) {
SimpleVersioningTest<TensorFlowMaximumOperator>();
}
TEST_F(OperatorTest, VersioningMinTest) {
SimpleVersioningTest<TensorFlowMinimumOperator>();
}
TEST_F(OperatorTest, VersioningMeanTest) {
SimpleVersioningTest<MeanOperator>();
}
TEST_F(OperatorTest, VersioningSumTest) {
SimpleVersioningTest<TensorFlowSumOperator>();
}
TEST_F(OperatorTest, VersioningAddTest) { SimpleVersioningTest<AddOperator>(); }
void SimpleMulVersioningTest(ArrayDataType data_type, float multiplier,
int version) {
MulOperator op;
op.inputs = {"input1", "input2"};
op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model model;
Array& input0 = model.GetOrCreateArray(op.inputs[0]);
Array& input1 = model.GetOrCreateArray(op.inputs[1]);
Array& output = model.GetOrCreateArray(op.outputs[0]);
input0.data_type = data_type;
input0.GetOrCreateQuantizationParams().scale = 1.0f;
input1.data_type = data_type;
input1.GetOrCreateQuantizationParams().scale = 1.0f;
output.data_type = data_type;
output.GetOrCreateQuantizationParams().scale = 1.0f / multiplier;
OperatorSignature signature = {.op = &op, .model = &model};
EXPECT_EQ(base_op->GetVersion(signature), version);
}
TEST_F(OperatorTest, VersioningMulTest) {
SimpleMulVersioningTest(ArrayDataType::kUint8, 0.5f, 1);
SimpleMulVersioningTest(ArrayDataType::kInt8, 0.5f, 2);
SimpleMulVersioningTest(ArrayDataType::kInt8, 2.0f, 3);
}
template <typename OpType>
void SimpleTwoInputsVersioningTest(ArrayDataType data_type, Shape shape1,
Shape shape2, int version) {
OpType op;
op.inputs = {"input1", "input2"};
op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model model;
Array& input0 = model.GetOrCreateArray(op.inputs[0]);
Array& input1 = model.GetOrCreateArray(op.inputs[1]);
Array& output = model.GetOrCreateArray(op.outputs[0]);
input0.data_type = data_type;
input0.copy_shape(shape1);
input1.data_type = data_type;
input1.copy_shape(shape2);
output.data_type = data_type;
OperatorSignature signature = {.op = &op, .model = &model};
EXPECT_EQ(base_op->GetVersion(signature), version);
}
template <typename OpType>
void SimpleThreeInputsVersioningTest(ArrayDataType data_type, Shape shape1,
Shape shape2, Shape shape3, int version) {
OpType op;
op.inputs = {"input1", "input2", "input3"};
op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* base_op = operator_by_type_map.at(op.type).get();
Model model;
Array& input0 = model.GetOrCreateArray(op.inputs[0]);
Array& input1 = model.GetOrCreateArray(op.inputs[1]);
Array& input2 = model.GetOrCreateArray(op.inputs[2]);
Array& output = model.GetOrCreateArray(op.outputs[0]);
input0.data_type = data_type;
input0.copy_shape(shape1);
input1.data_type = data_type;
input1.copy_shape(shape2);
input2.data_type = data_type;
input2.copy_shape(shape3);
output.data_type = data_type;
OperatorSignature signature = {.op = &op, .model = &model};
EXPECT_EQ(base_op->GetVersion(signature), version);
}
TEST_F(OperatorTest, VersioningSubTest) {
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 2}, 1);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kInt8, {1, 2, 2, 2},
{1, 2, 2, 2}, 2);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kUint8, {1, 2, 2},
{1, 2, 2}, 1);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kInt8, {1, 2, 2},
{1, 2, 2}, 2);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 1}, 1);
SimpleTwoInputsVersioningTest<SubOperator>(ArrayDataType::kInt8, {1, 2, 2, 2},
{1, 2, 2, 1}, 2);
SimpleTwoInputsVersioningTest<SubOperator>(
ArrayDataType::kUint8, {1, 2, 2, 2, 2}, {1, 2, 2, 2, 1}, 3);
SimpleTwoInputsVersioningTest<SubOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2, 2}, {1, 2, 2, 2, 1}, 3);
}
TEST_F(OperatorTest, VersioningDivTest) {
SimpleTwoInputsVersioningTest<DivOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 2}, 1);
SimpleTwoInputsVersioningTest<DivOperator>(ArrayDataType::kInt8, {1, 2, 2},
{1, 2, 2}, 1);
SimpleTwoInputsVersioningTest<DivOperator>(ArrayDataType::kUint8,
{1, 2, 2, 2}, {1, 2, 2, 1}, 1);
SimpleTwoInputsVersioningTest<DivOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2, 2}, {1, 2, 2, 2, 1}, 2);
}
TEST_F(OperatorTest, VersioningPadTest) { SimpleVersioningTest<PadOperator>(); }
TEST_F(OperatorTest, VersioningPadV2Test) {
SimpleVersioningTest<PadV2Operator>();
}
TEST_F(OperatorTest, VersioningConcatenationTest) {
SimpleVersioningTest<ConcatenationOperator>();
}
TEST_F(OperatorTest, VersioningSelectTest) {
SimpleThreeInputsVersioningTest<SelectOperator>(
ArrayDataType::kUint8, {1, 2, 2, 2}, {1, 2, 2, 1}, {1, 2, 2, 1}, 1);
SimpleThreeInputsVersioningTest<SelectOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2}, {1, 2, 2, 1}, {1, 2, 2, 1}, 2);
SimpleThreeInputsVersioningTest<SelectOperator>(
ArrayDataType::kInt8, {1, 2, 2, 2, 1}, {1, 2, 2, 1, 1}, {1, 2, 2, 1, 1},
3);
}
TEST_F(OperatorTest, VersioningRelu6Test) {
SimpleVersioningTest<Relu6Operator>();
}
TEST_F(OperatorTest, VersioningFullyConnectedTest) {
FullyConnectedOperator fully_connected_op;
fully_connected_op.inputs = {"input", "weight"};
fully_connected_op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op =
operator_by_type_map.at(fully_connected_op.type).get();
Model uint8_model;
Array& input_uint8_array =
uint8_model.GetOrCreateArray(fully_connected_op.inputs[0]);
input_uint8_array.data_type = ArrayDataType::kUint8;
Array& weight_uint8_array =
uint8_model.GetOrCreateArray(fully_connected_op.inputs[1]);
weight_uint8_array.data_type = ArrayDataType::kUint8;
Array& output_uint8_array =
uint8_model.GetOrCreateArray(fully_connected_op.outputs[0]);
output_uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &fully_connected_op,
.model = &uint8_model};
EXPECT_EQ(op->GetVersion(uint8_signature), 6);
Model int8_model;
Array& input_int8_array =
int8_model.GetOrCreateArray(fully_connected_op.inputs[0]);
input_int8_array.data_type = ArrayDataType::kInt8;
Array& weight_int8_array =
int8_model.GetOrCreateArray(fully_connected_op.inputs[1]);
weight_int8_array.data_type = ArrayDataType::kInt8;
Array& output_int8_array =
int8_model.GetOrCreateArray(fully_connected_op.outputs[0]);
output_int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &fully_connected_op,
.model = &int8_model};
EXPECT_EQ(op->GetVersion(int8_signature), 6);
}
TEST_F(OperatorTest, VersioningDequantizeTest) {
DequantizeOperator dequant_op;
dequant_op.inputs = {"input"};
dequant_op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op = operator_by_type_map.at(dequant_op.type).get();
Model int16_model;
Array& input_int16_array = int16_model.GetOrCreateArray(dequant_op.inputs[0]);
input_int16_array.data_type = ArrayDataType::kInt16;
OperatorSignature int16_signature = {.op = &dequant_op,
.model = &int16_model};
EXPECT_EQ(op->GetVersion(int16_signature), 3);
Model float16_model;
Array& input_float16_array =
float16_model.GetOrCreateArray(dequant_op.inputs[0]);
input_float16_array.data_type = ArrayDataType::kFloat16;
OperatorSignature float16_signature = {.op = &dequant_op,
.model = &float16_model};
EXPECT_EQ(op->GetVersion(float16_signature), 3);
Model int8_model;
Array& input_int8_array = int8_model.GetOrCreateArray(dequant_op.inputs[0]);
input_int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &dequant_op, .model = &int8_model};
EXPECT_EQ(op->GetVersion(int8_signature), 2);
Model float_model;
Array& input_float_array = float_model.GetOrCreateArray(dequant_op.inputs[0]);
input_float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &dequant_op,
.model = &float_model};
EXPECT_EQ(op->GetVersion(float_signature), 1);
}
TEST_F(OperatorTest, VersioningConv2DTest) {
ConvOperator conv_op;
conv_op.inputs = {"input", "filter"};
conv_op.outputs = {"output"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op = operator_by_type_map.at(conv_op.type).get();
Model uint8_model;
Array& input_uint8_array = uint8_model.GetOrCreateArray(conv_op.inputs[0]);
input_uint8_array.data_type = ArrayDataType::kUint8;
Array& filter_uint8_array = uint8_model.GetOrCreateArray(conv_op.inputs[1]);
filter_uint8_array.data_type = ArrayDataType::kUint8;
Array& output_uint8_array = uint8_model.GetOrCreateArray(conv_op.outputs[0]);
output_uint8_array.data_type = ArrayDataType::kUint8;
OperatorSignature uint8_signature = {.op = &conv_op, .model = &uint8_model};
EXPECT_EQ(op->GetVersion(uint8_signature), 1);
Model int8_model;
Array& input_int8_array = int8_model.GetOrCreateArray(conv_op.inputs[0]);
input_int8_array.data_type = ArrayDataType::kInt8;
Array& filter_int8_array = int8_model.GetOrCreateArray(conv_op.inputs[1]);
filter_int8_array.data_type = ArrayDataType::kInt8;
Array& output_int8_array = int8_model.GetOrCreateArray(conv_op.outputs[0]);
output_int8_array.data_type = ArrayDataType::kInt8;
OperatorSignature int8_signature = {.op = &conv_op, .model = &int8_model};
EXPECT_EQ(op->GetVersion(int8_signature), 3);
Model float_model;
Array& input_float_array = float_model.GetOrCreateArray(conv_op.inputs[0]);
input_float_array.data_type = ArrayDataType::kFloat;
Array& filter_int8_array1 = float_model.GetOrCreateArray(conv_op.inputs[1]);
filter_int8_array1.data_type = ArrayDataType::kInt8;
Array& output_float_array = float_model.GetOrCreateArray(conv_op.outputs[0]);
output_float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &conv_op, .model = &float_model};
EXPECT_EQ(op->GetVersion(float_signature), 2);
}
TEST_F(OperatorTest, VersioningFloorDivOperatorTest) {
FloorDivOperator floordiv_op;
floordiv_op.inputs = {"input1"};
auto operator_by_type_map = BuildOperatorByTypeMap(false );
const BaseOperator* op = operator_by_type_map.at(floordiv_op.type).get();
Model int32_model;
Array& input_int32_array =
int32_model.GetOrCreateArray(floordiv_op.inputs[0]);
input_int32_array.data_type = ArrayDataType::kInt32;
OperatorSignature int32_signature = {.op = &floordiv_op,
.model = &int32_model};
EXPECT_EQ(op->GetVersion(int32_signature), 1);
Model float_model;
Array& input_float_array =
float_model.GetOrCreateArray(floordiv_op.inputs[0]);
input_float_array.data_type = ArrayDataType::kFloat;
OperatorSignature float_signature = {.op = &floordiv_op,
.model = &float_model};
EXPECT_EQ(op->GetVersion(float_signature), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/operator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/operator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2557a474-7929-4212-8a22-7076742f216a | cpp | google/leveldb | write_batch | db/write_batch.cc | db/write_batch_test.cc | #include "leveldb/write_batch.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "util/coding.h"
namespace leveldb {
static const size_t kHeader = 12;
WriteBatch::WriteBatch() { Clear(); }
WriteBatch::~WriteBatch() = default;
WriteBatch::Handler::~Handler() = default;
void WriteBatch::Clear() {
rep_.clear();
rep_.resize(kHeader);
}
size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
Status WriteBatch::Iterate(Handler* handler) const {
Slice input(rep_);
if (input.size() < kHeader) {
return Status::Corruption("malformed WriteBatch (too small)");
}
input.remove_prefix(kHeader);
Slice key, value;
int found = 0;
while (!input.empty()) {
found++;
char tag = input[0];
input.remove_prefix(1);
switch (tag) {
case kTypeValue:
if (GetLengthPrefixedSlice(&input, &key) &&
GetLengthPrefixedSlice(&input, &value)) {
handler->Put(key, value);
} else {
return Status::Corruption("bad WriteBatch Put");
}
break;
case kTypeDeletion:
if (GetLengthPrefixedSlice(&input, &key)) {
handler->Delete(key);
} else {
return Status::Corruption("bad WriteBatch Delete");
}
break;
default:
return Status::Corruption("unknown WriteBatch tag");
}
}
if (found != WriteBatchInternal::Count(this)) {
return Status::Corruption("WriteBatch has wrong count");
} else {
return Status::OK();
}
}
int WriteBatchInternal::Count(const WriteBatch* b) {
return DecodeFixed32(b->rep_.data() + 8);
}
void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
EncodeFixed32(&b->rep_[8], n);
}
SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) {
return SequenceNumber(DecodeFixed64(b->rep_.data()));
}
void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) {
EncodeFixed64(&b->rep_[0], seq);
}
void WriteBatch::Put(const Slice& key, const Slice& value) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeValue));
PutLengthPrefixedSlice(&rep_, key);
PutLengthPrefixedSlice(&rep_, value);
}
void WriteBatch::Delete(const Slice& key) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeDeletion));
PutLengthPrefixedSlice(&rep_, key);
}
void WriteBatch::Append(const WriteBatch& source) {
WriteBatchInternal::Append(this, &source);
}
namespace {
class MemTableInserter : public WriteBatch::Handler {
public:
SequenceNumber sequence_;
MemTable* mem_;
void Put(const Slice& key, const Slice& value) override {
mem_->Add(sequence_, kTypeValue, key, value);
sequence_++;
}
void Delete(const Slice& key) override {
mem_->Add(sequence_, kTypeDeletion, key, Slice());
sequence_++;
}
};
}
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
MemTableInserter inserter;
inserter.sequence_ = WriteBatchInternal::Sequence(b);
inserter.mem_ = memtable;
return b->Iterate(&inserter);
}
void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) {
assert(contents.size() >= kHeader);
b->rep_.assign(contents.data(), contents.size());
}
void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) {
SetCount(dst, Count(dst) + Count(src));
assert(src->rep_.size() >= kHeader);
dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader);
}
} | #include "gtest/gtest.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/logging.h"
namespace leveldb {
static std::string PrintContents(WriteBatch* b) {
InternalKeyComparator cmp(BytewiseComparator());
MemTable* mem = new MemTable(cmp);
mem->Ref();
std::string state;
Status s = WriteBatchInternal::InsertInto(b, mem);
int count = 0;
Iterator* iter = mem->NewIterator();
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey ikey;
EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
switch (ikey.type) {
case kTypeValue:
state.append("Put(");
state.append(ikey.user_key.ToString());
state.append(", ");
state.append(iter->value().ToString());
state.append(")");
count++;
break;
case kTypeDeletion:
state.append("Delete(");
state.append(ikey.user_key.ToString());
state.append(")");
count++;
break;
}
state.append("@");
state.append(NumberToString(ikey.sequence));
}
delete iter;
if (!s.ok()) {
state.append("ParseError()");
} else if (count != WriteBatchInternal::Count(b)) {
state.append("CountMismatch()");
}
mem->Unref();
return state;
}
TEST(WriteBatchTest, Empty) {
WriteBatch batch;
ASSERT_EQ("", PrintContents(&batch));
ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
}
TEST(WriteBatchTest, Multiple) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
batch.Put(Slice("baz"), Slice("boo"));
WriteBatchInternal::SetSequence(&batch, 100);
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
ASSERT_EQ(
"Put(baz, boo)@102"
"Delete(box)@101"
"Put(foo, bar)@100",
PrintContents(&batch));
}
TEST(WriteBatchTest, Corruption) {
WriteBatch batch;
batch.Put(Slice("foo"), Slice("bar"));
batch.Delete(Slice("box"));
WriteBatchInternal::SetSequence(&batch, 200);
Slice contents = WriteBatchInternal::Contents(&batch);
WriteBatchInternal::SetContents(&batch,
Slice(contents.data(), contents.size() - 1));
ASSERT_EQ(
"Put(foo, bar)@200"
"ParseError()",
PrintContents(&batch));
}
TEST(WriteBatchTest, Append) {
WriteBatch b1, b2;
WriteBatchInternal::SetSequence(&b1, 200);
WriteBatchInternal::SetSequence(&b2, 300);
b1.Append(b2);
ASSERT_EQ("", PrintContents(&b1));
b2.Put("a", "va");
b1.Append(b2);
ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
b2.Clear();
b2.Put("b", "vb");
b1.Append(b2);
ASSERT_EQ(
"Put(a, va)@200"
"Put(b, vb)@201",
PrintContents(&b1));
b2.Delete("foo");
b1.Append(b2);
ASSERT_EQ(
"Put(a, va)@200"
"Put(b, vb)@202"
"Put(b, vb)@201"
"Delete(foo)@203",
PrintContents(&b1));
}
TEST(WriteBatchTest, ApproximateSize) {
WriteBatch batch;
size_t empty_size = batch.ApproximateSize();
batch.Put(Slice("foo"), Slice("bar"));
size_t one_key_size = batch.ApproximateSize();
ASSERT_LT(empty_size, one_key_size);
batch.Put(Slice("baz"), Slice("boo"));
size_t two_keys_size = batch.ApproximateSize();
ASSERT_LT(one_key_size, two_keys_size);
batch.Delete(Slice("box"));
size_t post_delete_size = batch.ApproximateSize();
ASSERT_LT(two_keys_size, post_delete_size);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/write_batch.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/write_batch_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
802b3b11-0f4c-4890-b4d4-7fec4156a47f | cpp | google/arolla | qtype | arolla/jagged_shape/dense_array/qtype/qtype.cc | arolla/jagged_shape/dense_array/qtype/qtype_test.cc | #include "arolla/jagged_shape/dense_array/qtype/qtype.h"
#include "absl/base/no_destructor.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include "arolla/jagged_shape/qtype/qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
class JaggedDenseArrayShapeQType final : public JaggedShapeQType {
public:
static const JaggedDenseArrayShapeQType* GetInstance() {
static absl::NoDestructor<JaggedDenseArrayShapeQType> result;
return result.get();
}
JaggedDenseArrayShapeQType()
: JaggedShapeQType(meta::type<JaggedDenseArrayShape>(),
"JAGGED_DENSE_ARRAY_SHAPE") {}
QTypePtr edge_qtype() const override { return GetQType<DenseArrayEdge>(); };
};
}
QTypePtr QTypeTraits<JaggedDenseArrayShape>::type() {
return JaggedDenseArrayShapeQType::GetInstance();
}
AROLLA_INITIALIZER(
.reverse_deps = {arolla::initializer_dep::kQTypes}, .init_fn = [] {
return SetEdgeQTypeToJaggedShapeQType(
GetQType<DenseArrayEdge>(), GetQType<JaggedDenseArrayShape>());
})
} | #include "arolla/jagged_shape/dense_array/qtype/qtype.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/jagged_shape/array/jagged_shape.h"
#include "arolla/jagged_shape/array/qtype/qtype.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include "arolla/jagged_shape/qtype/qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
TEST(QTypeTest, TypedValueRepr) {
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto shape, JaggedDenseArrayShape::FromEdges({edge}));
auto tv = TypedValue::FromValue(shape);
EXPECT_THAT(tv.GenReprToken(), ReprTokenEq("JaggedShape(2)"));
}
TEST(QTypeTest, JaggedDenseArrayShapeQType) {
QTypePtr type = GetQType<JaggedDenseArrayShape>();
EXPECT_NE(type, nullptr);
EXPECT_EQ(type->name(), "JAGGED_DENSE_ARRAY_SHAPE");
EXPECT_EQ(type->type_info(), typeid(JaggedDenseArrayShape));
EXPECT_EQ(type->value_qtype(), nullptr);
EXPECT_TRUE(IsJaggedShapeQType(type));
EXPECT_EQ(type, GetQType<JaggedDenseArrayShape>());
EXPECT_NE(type, GetQType<JaggedArrayShape>());
}
TEST(QTypeTest, JaggedDenseArrayShapeFingerprint) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
ASSERT_OK_AND_ASSIGN(auto shape1,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto shape2,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
auto tv1 = TypedValue::FromValue(shape1);
auto tv2 = TypedValue::FromValue(shape2);
EXPECT_EQ(tv1.GetFingerprint(), tv2.GetFingerprint());
ASSERT_OK_AND_ASSIGN(auto edge3, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 4})));
ASSERT_OK_AND_ASSIGN(auto shape3,
JaggedDenseArrayShape::FromEdges({edge1, edge3}));
auto tv3 = TypedValue::FromValue(shape3);
EXPECT_NE(tv1.GetFingerprint(), tv3.GetFingerprint());
}
TEST(QTypeTest, CopyTo) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
ASSERT_OK_AND_ASSIGN(auto shape,
JaggedDenseArrayShape::FromEdges({edge1, edge2}));
auto tv = TypedValue::FromValue(shape);
auto tv_copy = TypedValue(tv.AsRef());
EXPECT_EQ(tv.GetFingerprint(), tv_copy.GetFingerprint());
}
TEST(QTypeTest, JaggedShapeQTypeFromEdgeQType) {
{
ASSERT_OK_AND_ASSIGN(auto shape_qtype, GetJaggedShapeQTypeFromEdgeQType(
GetQType<DenseArrayEdge>()));
EXPECT_EQ(shape_qtype, GetQType<JaggedDenseArrayShape>());
}
{
EXPECT_THAT(
GetJaggedShapeQTypeFromEdgeQType(GetQType<DenseArrayGroupScalarEdge>()),
StatusIs(absl::StatusCode::kInvalidArgument,
"DENSE_ARRAY_TO_SCALAR_EDGE key is not registered"));
}
{
EXPECT_THAT(
SetEdgeQTypeToJaggedShapeQType(GetQType<DenseArrayEdge>(),
GetQType<DenseArrayGroupScalarEdge>()),
StatusIs(absl::StatusCode::kInvalidArgument,
"DENSE_ARRAY_EDGE key is already registered"));
}
}
TEST(QTypeTest, EdgeQType) {
auto type = GetQType<JaggedDenseArrayShape>();
auto shape_qtype = dynamic_cast<const JaggedShapeQType*>(type);
EXPECT_EQ(shape_qtype->edge_qtype(), GetQType<DenseArrayEdge>());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/dense_array/qtype/qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/dense_array/qtype/qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
82d02e5a-7dfd-45a3-8fb6-a86cc367b6a7 | cpp | google/tensorstore | contiguous_layout | tensorstore/contiguous_layout.cc | tensorstore/contiguous_layout_test.cc | #include "tensorstore/contiguous_layout.h"
#include <stddef.h>
#include <cassert>
#include <ostream>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void ComputeStrides(ContiguousLayoutOrder order, ptrdiff_t element_stride,
tensorstore::span<const Index> shape,
tensorstore::span<Index> strides) {
const DimensionIndex rank = shape.size();
assert(strides.size() == rank);
if (order == ContiguousLayoutOrder::right) {
for (DimensionIndex i = rank - 1; i >= 0; --i) {
strides[i] = element_stride;
element_stride *= shape[i];
}
} else {
for (DimensionIndex i = 0; i < rank; ++i) {
strides[i] = element_stride;
element_stride *= shape[i];
}
}
}
std::ostream& operator<<(std::ostream& os, ContiguousLayoutOrder order) {
return os << (order == ContiguousLayoutOrder::c ? 'C' : 'F');
}
} | #include "tensorstore/contiguous_layout.h"
#include <array>
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::ComputeStrides;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::GetContiguousIndices;
using ::tensorstore::GetContiguousOffset;
using ::tensorstore::Index;
TEST(ContiguousLayoutOrderTest, PrintToOstream) {
{
std::ostringstream ostr;
ostr << ContiguousLayoutOrder::c;
EXPECT_EQ("C", ostr.str());
}
{
std::ostringstream ostr;
ostr << ContiguousLayoutOrder::fortran;
EXPECT_EQ("F", ostr.str());
}
}
TEST(ComputeStridesTest, COrder) {
{
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::c, 1,
tensorstore::span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(20, 5, 1));
}
{
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::c, 2,
tensorstore::span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(40, 10, 2));
}
}
TEST(ComputeStridesTest, FOrder) {
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::fortran, 1,
tensorstore::span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(1, 3, 12));
}
TEST(GetContiguousOffsetTest, Basic) {
Index indices[2];
EXPECT_EQ(3 * 11 + 4,
GetContiguousOffset<ContiguousLayoutOrder::c>({{7, 11}}, {{3, 4}}));
GetContiguousIndices<ContiguousLayoutOrder::c, Index>(3 * 11 + 4, {{7, 11}},
indices);
EXPECT_THAT(indices, ::testing::ElementsAre(3, 4));
EXPECT_EQ(3 + 4 * 7, GetContiguousOffset<ContiguousLayoutOrder::fortran>(
{{7, 11}}, {{3, 4}}));
GetContiguousIndices<ContiguousLayoutOrder::fortran, Index>(
3 + 4 * 7, {{7, 11}}, indices);
EXPECT_THAT(indices, ::testing::ElementsAre(3, 4));
EXPECT_EQ(
2 * (7 * 11) + 3 * 11 + 4,
GetContiguousOffset<ContiguousLayoutOrder::c>({{5, 7, 11}}, {{2, 3, 4}}));
EXPECT_EQ(2 + 5 * 3 + (5 * 7) * 4,
GetContiguousOffset<ContiguousLayoutOrder::fortran>({{5, 7, 11}},
{{2, 3, 4}}));
EXPECT_EQ(0, GetContiguousOffset<ContiguousLayoutOrder::c>({}, {}));
EXPECT_EQ(0, GetContiguousOffset<ContiguousLayoutOrder::fortran>({}, {}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/contiguous_layout.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/contiguous_layout_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
be356233-8fba-4b09-a422-fc77a6275b18 | cpp | tensorflow/tensorflow | proto_serialization | third_party/xla/xla/tsl/lib/strings/proto_serialization.cc | tensorflow/core/lib/strings/proto_serialization_test.cc | #include "xla/tsl/lib/strings/proto_serialization.h"
#include <cstring>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/gtl/inlined_vector.h"
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tsl {
namespace {
class DeterministicSerializer {
public:
explicit DeterministicSerializer(const protobuf::MessageLite& msg)
: DeterministicSerializer(msg, msg.ByteSizeLong()) {}
DeterministicSerializer(const protobuf::MessageLite& msg, size_t size)
: size_(size) {
char* ptr = space_;
if (size_ > sizeof(space_)) {
ptr = new char[size_];
alloc_.reset(ptr);
}
bool ok = SerializeToBufferDeterministic(msg, ptr, size_);
DCHECK(ok);
}
size_t size() const { return size_; }
const char* data() const { return alloc_ == nullptr ? space_ : alloc_.get(); }
private:
static constexpr int kInlinedBufferSize = 256;
const size_t size_;
std::unique_ptr<char[]> alloc_;
char space_[kInlinedBufferSize];
};
}
bool SerializeToStringDeterministic(const protobuf::MessageLite& msg,
string* result) {
const size_t size = msg.ByteSizeLong();
DCHECK_LE(size, static_cast<size_t>(INT_MAX));
*result = string(size, '\0');
return SerializeToBufferDeterministic(msg, const_cast<char*>(result->data()),
result->size());
}
bool SerializeToBufferDeterministic(const protobuf::MessageLite& msg,
char* buffer, size_t size) {
DCHECK(msg.ByteSizeLong() == size && size <= static_cast<size_t>(INT_MAX));
protobuf::io::ArrayOutputStream array_stream(buffer, size);
protobuf::io::CodedOutputStream output_stream(&array_stream);
output_stream.SetSerializationDeterministic(true);
msg.SerializeWithCachedSizes(&output_stream);
return !output_stream.HadError() &&
size == static_cast<size_t>(output_stream.ByteCount());
}
bool AreSerializedProtosEqual(const protobuf::MessageLite& x,
const protobuf::MessageLite& y) {
const size_t size = x.ByteSizeLong();
if (size != y.ByteSizeLong()) return false;
if (size == 0) return true;
DeterministicSerializer x_serialized(x, size);
DeterministicSerializer y_serialized(y, size);
return memcmp(x_serialized.data(), y_serialized.data(), size) == 0;
}
uint64 DeterministicProtoHash64(const protobuf::MessageLite& proto,
uint64 seed) {
DeterministicSerializer serialized(proto);
return Hash64(serialized.data(), serialized.size(), seed);
}
uint64 DeterministicProtoHash64(const protobuf::MessageLite& proto) {
DeterministicSerializer serialized(proto);
return Hash64(serialized.data(), serialized.size());
}
} | #include "tensorflow/core/lib/strings/proto_serialization.h"
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
GraphDef MakeGraphDef(int num_nodes) {
GraphDef graph_def;
for (int i = 0; i < num_nodes; ++i) {
NodeDef* node = graph_def.add_node();
node->set_name(strings::StrCat("node", i));
node->set_op(strings::StrCat("op", i % 10));
(*node->mutable_attr())["foo"].set_f(3.14f);
(*node->mutable_attr())["bar"].set_s("baz");
}
return graph_def;
}
}
static void BM_ProtoSerializationToString(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def = MakeGraphDef(num_nodes);
for (auto i : state) {
string serialized;
testing::DoNotOptimize(
SerializeToStringDeterministic(graph_def, &serialized));
}
}
BENCHMARK(BM_ProtoSerializationToString)->Range(1, 10000);
static void BM_ProtoSerializationToBuffer(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def = MakeGraphDef(num_nodes);
const size_t size = graph_def.ByteSizeLong();
for (auto i : state) {
gtl::InlinedVector<char, 1024> buf(size);
testing::DoNotOptimize(
SerializeToBufferDeterministic(graph_def, buf.data(), size));
}
}
BENCHMARK(BM_ProtoSerializationToBuffer)->Range(1, 10000);
static void BM_DeterministicProtoHash64(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def = MakeGraphDef(num_nodes);
for (auto i : state) {
testing::DoNotOptimize(DeterministicProtoHash64(graph_def));
}
}
BENCHMARK(BM_DeterministicProtoHash64)->Range(1, 10000);
static void BM_AreSerializedProtosEqual(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def_a = MakeGraphDef(num_nodes);
GraphDef graph_def_b = MakeGraphDef(num_nodes);
graph_def_b.mutable_node(0)->mutable_name()[0] = 'l';
for (auto i : state) {
testing::DoNotOptimize(AreSerializedProtosEqual(graph_def_a, graph_def_a));
}
}
BENCHMARK(BM_AreSerializedProtosEqual)->Range(1, 10000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/strings/proto_serialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/strings/proto_serialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22592641-0513-4ddb-bc56-908b24bd89c8 | cpp | google/tensorstore | gce_auth_provider | tensorstore/internal/oauth2/gce_auth_provider.cc | tensorstore/internal/oauth2/gce_auth_provider_test.cc | #include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include <functional>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <utility>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
ABSL_FLAG(std::optional<std::string>, tensorstore_gce_metadata_root,
std::nullopt,
"Url to used for http access metadata.google.internal. "
"Overrides GCE_METADATA_ROOT.");
namespace tensorstore {
namespace internal_oauth2 {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
constexpr static auto ServiceAccountInfoBinder = jb::Object(
jb::Member("email",
jb::Projection(&GceAuthProvider::ServiceAccountInfo::email,
jb::NonEmptyStringBinder)),
jb::Member("scopes",
jb::Projection(&GceAuthProvider::ServiceAccountInfo::scopes)),
jb::DiscardExtraMembers);
}
std::string GceMetadataHostname() {
return GetFlagOrEnvValue(FLAGS_tensorstore_gce_metadata_root,
"GCE_METADATA_ROOT")
.value_or("metadata.google.internal");
}
GceAuthProvider::GceAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport,
const ServiceAccountInfo& service_account_info,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
service_account_email_(service_account_info.email),
scopes_(service_account_info.scopes.begin(),
service_account_info.scopes.end()),
transport_(std::move(transport)) {}
Result<HttpResponse> GceAuthProvider::IssueRequest(std::string path,
bool recursive) {
HttpRequestBuilder request_builder(
"GET", internal::JoinPath("http:
request_builder.AddHeader("Metadata-Flavor: Google");
if (recursive) {
request_builder.AddQueryParameter("recursive", "true");
}
return transport_->IssueRequest(request_builder.BuildRequest(), {}).result();
}
Result<GceAuthProvider::ServiceAccountInfo>
GceAuthProvider::GetDefaultServiceAccountInfoIfRunningOnGce(
internal_http::HttpTransport* transport) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto response,
transport
->IssueRequest(
HttpRequestBuilder(
"GET",
internal::JoinPath(
"http:
"/computeMetadata/v1/instance/service-accounts/default/"))
.AddHeader("Metadata-Flavor: Google")
.AddQueryParameter("recursive", "true")
.BuildRequest(),
{})
.result());
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
auto info_response = internal::ParseJson(response.payload.Flatten());
if (info_response.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Failed to parse service account response: ",
response.payload.Flatten()));
}
return jb::FromJson<ServiceAccountInfo>(info_response,
ServiceAccountInfoBinder);
}
Result<BearerTokenWithExpiration> GceAuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto response,
IssueRequest(
tensorstore::StrCat("/computeMetadata/v1/instance/service-accounts/",
service_account_email_, "/token"),
false));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::GceAuthProvider;
const char kOAuthResponse[] = R"(
{
"token_type" : "refresh",
"access_token": "abc",
"expires_in": 456
}
)";
class TestAuthProvider : public GceAuthProvider {
public:
TestAuthProvider()
: GceAuthProvider(nullptr, {"[email protected]", {"abc", "xyz"}},
[this] { return this->time; }),
time(absl::Now()),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string path, bool recursive) {
request.emplace_back(std::move(path));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::string> request;
};
TEST(GceAuthProviderTest, InitialState) {
TestAuthProvider auth;
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(GceAuthProviderTest, Status200) {
TestAuthProvider auth;
auth.responses = {
{0, {200, absl::Cord(kOAuthResponse), {}}},
{1, {200, absl::Cord(kOAuthResponse), {}}},
};
EXPECT_FALSE(auth.IsValid());
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
TEST(GceAuthProviderTest, NoResponse) {
TestAuthProvider auth;
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ(
"/computeMetadata/v1/instance/service-accounts/[email protected]/token",
auth.request[0]);
}
TEST(GceAuthProviderTest, Status400) {
TestAuthProvider auth;
auth.responses = {
{0, {400, absl::Cord(kOAuthResponse), {}}},
};
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_FALSE(result.ok()) << result.status();
}
TEST(GceAuthProviderTest, Hostname) {
EXPECT_EQ("metadata.google.internal",
tensorstore::internal_oauth2::GceMetadataHostname());
tensorstore::internal::SetEnv("GCE_METADATA_ROOT", "localhost");
EXPECT_EQ("localhost", tensorstore::internal_oauth2::GceMetadataHostname());
tensorstore::internal::UnsetEnv("GCE_METADATA_ROOT");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/gce_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/gce_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b2d66996-ec37-441c-9dba-74eedda21742 | cpp | tensorflow/tensorflow | all_reduce_simplifier | third_party/xla/xla/service/all_reduce_simplifier.cc | third_party/xla/xla/service/all_reduce_simplifier_test.cc | #include "xla/service/all_reduce_simplifier.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<bool> AllReduceSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(
auto replication,
HloReplicationAnalysis::Run(module, false));
std::vector<std::pair<HloInstruction*, int64_t>> all_reduces_to_replace;
auto get_participant_counts_for_replica_group =
[](const HloInstruction* all_reduce) -> absl::StatusOr<int64_t> {
const HloModuleConfig& config = all_reduce->GetModule()->config();
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(),
Cast<HloAllReduceInstruction>(all_reduce)
->use_global_device_ids()));
int64_t num_devices = config.num_partitions();
int64_t num_replicas = config.replica_count();
TF_ASSIGN_OR_RETURN(std::vector<int64_t> participant_counts,
GetPariticipantCountsForReplicaGroups(
num_replicas, num_devices,
all_reduce->replica_groups(), group_mode));
if (participant_counts.empty()) {
return -1;
}
if (!absl::c_all_of(participant_counts, [&](int64_t participant_count) {
return participant_count == participant_counts[0];
})) {
return -1;
}
return participant_counts[0];
};
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if ((inst->opcode() == HloOpcode::kAllGather ||
inst->opcode() == HloOpcode::kReduceScatter) &&
ShapeUtil::Compatible(inst->shape(), inst->operand(0)->shape())) {
changed = true;
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(inst, inst->mutable_operand(0)));
}
}
}
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
if (!inst->shape().IsArray()) {
continue;
}
if (!inst->IsCrossReplicaAllReduce() && !inst->IsCrossModuleAllReduce()) {
continue;
}
TF_ASSIGN_OR_RETURN(int64_t group_size,
get_participant_counts_for_replica_group(inst));
if (group_size == -1 ||
(!inst->IsCrossReplicaAllReduce() && group_size != 1) ||
(!inst->IsCrossReplicaAllReduce() &&
!module->config().use_spmd_partitioning())) {
continue;
}
if (replication->HloInstructionIsReplicatedAt(inst->operand(0), {}) ||
group_size == 1) {
all_reduces_to_replace.push_back({inst, group_size});
}
}
}
for (auto all_reduce_and_group_size : all_reduces_to_replace) {
auto all_reduce = all_reduce_and_group_size.first;
const int64_t replica_group_size = all_reduce_and_group_size.second;
if (replica_group_size == 1) {
TF_RETURN_IF_ERROR(all_reduce->parent()->ReplaceInstruction(
all_reduce, all_reduce->mutable_operand(0)));
changed = true;
continue;
}
if (all_reduce->to_apply()->instruction_count() != 3 ||
all_reduce->to_apply()->num_parameters() != 2) {
continue;
}
HloInstruction* replacement;
switch (all_reduce->to_apply()->root_instruction()->opcode()) {
case HloOpcode::kAdd: {
auto multiplier =
all_reduce->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(replica_group_size)));
if (all_reduce->shape().element_type() != S32) {
multiplier = all_reduce->parent()->AddInstruction(
HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(
multiplier->shape(), all_reduce->shape().element_type()),
multiplier));
}
if (all_reduce->shape().rank() > 0) {
multiplier = all_reduce->parent()->AddInstruction(
HloInstruction::CreateBroadcast(all_reduce->shape(), multiplier,
{}));
}
replacement =
all_reduce->parent()->AddInstruction(HloInstruction::CreateBinary(
all_reduce->shape(), HloOpcode::kMultiply,
all_reduce->mutable_operand(0), multiplier));
break;
}
case HloOpcode::kMinimum:
case HloOpcode::kMaximum:
case HloOpcode::kOr:
case HloOpcode::kAnd:
replacement = all_reduce->mutable_operand(0);
break;
default:
continue;
}
VLOG(2) << "Replacing " << all_reduce->ToString() << " with "
<< replacement->ToString();
TF_RETURN_IF_ERROR(all_reduce->ReplaceAllUsesWith(replacement));
changed = true;
}
return changed;
}
} | #include "xla/service/all_reduce_simplifier.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
using AllReduceSimplifierTest = HloTestBase;
TEST_F(AllReduceSimplifierTest, ReplicatedParameters) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
min {
a.2 = f32[] parameter(0)
b.2 = f32[] parameter(1)
ROOT min = f32[] minimum(a.2, b.2)
}
sum.1 {
a.3 = f32[] parameter(0)
b.3 = f32[] parameter(1)
ROOT add.1 = f32[] add(a.3, b.3)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={true}
p1 = f32[8,16] parameter(1), parameter_replication={false}
p2 = f32[] parameter(2), parameter_replication={true}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=sum
all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max
all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={}, to_apply=min
all-reduce.3 = f32[] all-reduce(p2), replica_groups={}, to_apply=sum.1
ROOT tuple = (f32[8,16], f32[8,16], f32[8,16], f32[]) tuple(all-reduce, all-reduce.1, all-reduce.2, all-reduce.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MultiplyAnyOrder(m::Parameter(0),
m::Broadcast(m::Convert(m::ConstantScalar(8)))),
m::Parameter(0), m::AllReduce(m::Parameter(1)),
m::MultiplyAnyOrder(m::Parameter(2),
m::Convert(m::ConstantScalar(8))))));
}
TEST_F(AllReduceSimplifierTest, AllReduceAfterAllReduce) {
const char* kModuleStr = R"(
HloModule m
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max
ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::MultiplyAnyOrder(
m::AllReduce(m::Parameter(0)),
m::Broadcast(m::Convert(m::ConstantScalar(8))))));
}
TEST_F(AllReduceSimplifierTest, SubgroupAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a.1 = f32[] parameter(0)
b.1 = f32[] parameter(1)
ROOT max = f32[] maximum(a.1, b.1)
}
min {
a.2 = f32[] parameter(0)
b.2 = f32[] parameter(1)
ROOT min = f32[] minimum(a.2, b.2)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={true}
p1 = f32[8,16] parameter(1), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum
all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=max
all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=min
ROOT tuple = (f32[8,16], f32[8,16], f32[8,16]) tuple(all-reduce, all-reduce.1, all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
ASSERT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MultiplyAnyOrder(m::Parameter(0),
m::Broadcast(m::Convert(m::ConstantScalar(4)))),
m::Parameter(0), m::AllReduce(m::Parameter(1)))));
}
TEST_F(AllReduceSimplifierTest, TrivialSubgroupAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kModuleStr, 8));
AllReduceSimplifier simplifier(8);
EXPECT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AllReduceSimplifierTest, TrivialSubgroupNonCrossReplicaAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
use_global_device_ids=true,
replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
AllReduceSimplifier simplifier(1);
EXPECT_TRUE(simplifier.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(AllReduceSimplifierTest, NonCrossReplicaAllReduceAfterAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
use_global_device_ids=true,
replica_groups={{0,2},{1,3},{4,6},{5,7}},
to_apply=sum
ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce),
channel_id=2,
use_global_device_ids=true,
replica_groups={{0,4},{1,5},{2,6},{3,7}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,
8));
module->mutable_config().set_use_spmd_partitioning(true);
AllReduceSimplifier simplifier(1);
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
TEST_F(AllReduceSimplifierTest, MPMDNonCrossReplicaAllReduce) {
const char* kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
test {
p0 = f32[8,16] parameter(0), parameter_replication={false}
ROOT all-reduce = f32[8,16] all-reduce(p0),
channel_id=1,
replica_groups={{0},{1}},
to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kModuleStr, 2,
1));
module->mutable_config().set_use_spmd_partitioning(false);
AllReduceSimplifier simplifier(2);
EXPECT_FALSE(simplifier.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
efb6078c-2e87-4404-afaf-515287b1e7eb | cpp | tensorflow/tensorflow | multiply | tensorflow/lite/experimental/shlo/ops/multiply.cc | tensorflow/lite/experimental/shlo/ops/multiply_test.cc | #include "tensorflow/lite/experimental/shlo/ops/multiply.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType expressed_type>
struct Multiply : std::multiplies<void> {};
template <>
struct Multiply<DataType::kI1> {
template <class T>
T operator()(const T& lhs, const T& rhs) const {
return static_cast<T>(lhs && rhs);
}
};
MultiplyOp Create(MultiplyOp::Attributes) { return {}; }
absl::Status Prepare(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("multiply"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("multiply"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("multiply"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MultiplyOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsBoolTensor(lhs)) {
detail::EvaluateNoQuantization<DataType::kI1>(Multiply<DataType::kI1>(),
lhs, rhs, output);
return absl::OkStatus();
} else if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
Multiply<DataType::kF32> multiply;
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), multiply, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
Multiply<DataType::kF32> multiply;
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
multiply, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.multiply: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/multiply.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<MultiplyOp> {
static std::string Get() { return "Multiply"; }
};
template <DataType expressed_type>
struct Multiply : std::multiplies<void> {};
template <>
struct Multiply<DataType::kI1> {
template <class T>
T operator()(const T& lhs, const T& rhs) const {
return static_cast<T>(lhs && rhs);
}
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Multiply,
BinaryElementwiseOpShapePropagationTest,
MultiplyOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
MultiplyOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes,
BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Multiply, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes = WithOpTypes<MultiplyOp, PerAxisQuantizedTestTypes>;
INSTANTIATE_TYPED_TEST_SUITE_P(Multiply, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<BoolTestType, ArithmeticTestTypes>;
template <class T>
struct MultiplyTest : ::testing::Test {};
TYPED_TEST_SUITE(MultiplyTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(MultiplyTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Multiply<TypeParam::kStorage>());
auto op = Create(MultiplyOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedMultiplyTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedMultiplyTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedMultiplyTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Multiply<TypeParam::kExpressed>()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(MultiplyOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/multiply.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/multiply_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e38f2614-c0c8-476a-930c-933b2ee50af8 | cpp | google/quiche | icmp_packet | quiche/quic/qbone/platform/icmp_packet.cc | quiche/quic/qbone/platform/icmp_packet_test.cc | #include "quiche/quic/qbone/platform/icmp_packet.h"
#include <netinet/ip6.h>
#include <algorithm>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/internet_checksum.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
namespace {
constexpr size_t kIPv6AddressSize = sizeof(in6_addr);
constexpr size_t kIPv6HeaderSize = sizeof(ip6_hdr);
constexpr size_t kICMPv6HeaderSize = sizeof(icmp6_hdr);
constexpr size_t kIPv6MinPacketSize = 1280;
constexpr size_t kIcmpTtl = 255;
constexpr size_t kICMPv6BodyMaxSize =
kIPv6MinPacketSize - kIPv6HeaderSize - kICMPv6HeaderSize;
struct ICMPv6Packet {
ip6_hdr ip_header;
icmp6_hdr icmp_header;
uint8_t body[kICMPv6BodyMaxSize];
};
struct IPv6PseudoHeader {
uint32_t payload_size{};
uint8_t zeros[3] = {0, 0, 0};
uint8_t next_header = IPPROTO_ICMPV6;
};
}
void CreateIcmpPacket(in6_addr src, in6_addr dst, const icmp6_hdr& icmp_header,
absl::string_view body,
quiche::UnretainedCallback<void(absl::string_view)> cb) {
const size_t body_size = std::min(body.size(), kICMPv6BodyMaxSize);
const size_t payload_size = kICMPv6HeaderSize + body_size;
ICMPv6Packet icmp_packet{};
icmp_packet.ip_header.ip6_vfc = 0x6 << 4;
icmp_packet.ip_header.ip6_plen =
quiche::QuicheEndian::HostToNet16(payload_size);
icmp_packet.ip_header.ip6_nxt = IPPROTO_ICMPV6;
icmp_packet.ip_header.ip6_hops = kIcmpTtl;
icmp_packet.ip_header.ip6_src = src;
icmp_packet.ip_header.ip6_dst = dst;
icmp_packet.icmp_header = icmp_header;
icmp_packet.icmp_header.icmp6_cksum = 0;
IPv6PseudoHeader pseudo_header{};
pseudo_header.payload_size = quiche::QuicheEndian::HostToNet32(payload_size);
InternetChecksum checksum;
checksum.Update(icmp_packet.ip_header.ip6_src.s6_addr, kIPv6AddressSize);
checksum.Update(icmp_packet.ip_header.ip6_dst.s6_addr, kIPv6AddressSize);
checksum.Update(reinterpret_cast<char*>(&pseudo_header),
sizeof(pseudo_header));
checksum.Update(reinterpret_cast<const char*>(&icmp_packet.icmp_header),
sizeof(icmp_packet.icmp_header));
checksum.Update(body.data(), body_size);
icmp_packet.icmp_header.icmp6_cksum = checksum.Value();
memcpy(icmp_packet.body, body.data(), body_size);
const char* packet = reinterpret_cast<char*>(&icmp_packet);
const size_t packet_size = offsetof(ICMPv6Packet, body) + body_size;
cb(absl::string_view(packet, packet_size));
}
} | #include "quiche/quic/qbone/platform/icmp_packet.h"
#include <netinet/ip6.h>
#include <cstdint>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace {
constexpr char kReferenceSourceAddress[] = "fe80:1:2:3:4::1";
constexpr char kReferenceDestinationAddress[] = "fe80:4:3:2:1::1";
constexpr uint8_t kReferenceICMPMessageBody[] {
0xd2, 0x61, 0x29, 0x5b, 0x00, 0x00, 0x00, 0x00,
0x0d, 0x59, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37
};
constexpr uint8_t kReferenceICMPPacket[] = {
0x60, 0x00, 0x00, 0x00,
0x00, 0x40,
0x3a,
0xFF,
0xfe, 0x80, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03,
0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfe, 0x80, 0x00, 0x04, 0x00, 0x03, 0x00, 0x02,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x80, 0x00,
0xec, 0x00,
0xcb, 0x82,
0x00, 0x01,
0xd2, 0x61, 0x29, 0x5b, 0x00, 0x00, 0x00, 0x00,
0x0d, 0x59, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37
};
}
TEST(IcmpPacketTest, CreatedPacketMatchesReference) {
QuicIpAddress src;
ASSERT_TRUE(src.FromString(kReferenceSourceAddress));
in6_addr src_addr;
memcpy(src_addr.s6_addr, src.ToPackedString().data(), sizeof(in6_addr));
QuicIpAddress dst;
ASSERT_TRUE(dst.FromString(kReferenceDestinationAddress));
in6_addr dst_addr;
memcpy(dst_addr.s6_addr, dst.ToPackedString().data(), sizeof(in6_addr));
icmp6_hdr icmp_header{};
icmp_header.icmp6_type = ICMP6_ECHO_REQUEST;
icmp_header.icmp6_id = 0x82cb;
icmp_header.icmp6_seq = 0x0100;
absl::string_view message_body = absl::string_view(
reinterpret_cast<const char*>(kReferenceICMPMessageBody), 56);
absl::string_view expected_packet = absl::string_view(
reinterpret_cast<const char*>(kReferenceICMPPacket), 104);
CreateIcmpPacket(src_addr, dst_addr, icmp_header, message_body,
[&expected_packet](absl::string_view packet) {
QUIC_LOG(INFO) << quiche::QuicheTextUtils::HexDump(packet);
ASSERT_EQ(packet, expected_packet);
});
}
TEST(IcmpPacketTest, NonZeroChecksumIsIgnored) {
QuicIpAddress src;
ASSERT_TRUE(src.FromString(kReferenceSourceAddress));
in6_addr src_addr;
memcpy(src_addr.s6_addr, src.ToPackedString().data(), sizeof(in6_addr));
QuicIpAddress dst;
ASSERT_TRUE(dst.FromString(kReferenceDestinationAddress));
in6_addr dst_addr;
memcpy(dst_addr.s6_addr, dst.ToPackedString().data(), sizeof(in6_addr));
icmp6_hdr icmp_header{};
icmp_header.icmp6_type = ICMP6_ECHO_REQUEST;
icmp_header.icmp6_id = 0x82cb;
icmp_header.icmp6_seq = 0x0100;
icmp_header.icmp6_cksum = 0x1234;
absl::string_view message_body = absl::string_view(
reinterpret_cast<const char*>(kReferenceICMPMessageBody), 56);
absl::string_view expected_packet = absl::string_view(
reinterpret_cast<const char*>(kReferenceICMPPacket), 104);
CreateIcmpPacket(src_addr, dst_addr, icmp_header, message_body,
[&expected_packet](absl::string_view packet) {
QUIC_LOG(INFO) << quiche::QuicheTextUtils::HexDump(packet);
ASSERT_EQ(packet, expected_packet);
});
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/platform/icmp_packet.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/platform/icmp_packet_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
65e114f5-b5fd-4148-9e57-cab65cb9be29 | cpp | tensorflow/tensorflow | eigen_activations | tensorflow/core/kernels/eigen_activations.h | tensorflow/core/kernels/eigen_activations_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_ACTIVATIONS_H_
#include "unsupported/Eigen/CXX11/Tensor"
namespace Eigen {
template <typename T>
struct scalar_sigmoid_fast_derivative_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const {
const T one = T(1);
return (one - y) * y;
}
template <typename Packet>
inline Packet packetOp(const Packet& y) const {
const Packet one = internal::pset1<Packet>(1);
return internal::pmul(internal::psub(one, y), y);
}
};
namespace internal {
template <typename T>
struct functor_traits<scalar_sigmoid_fast_derivative_op<T> > {
enum {
Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost,
PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul &&
packet_traits<T>::HasNegate
};
};
}
template <typename T>
struct scalar_tanh_fast_derivative_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& y) const {
const T one = T(1);
return one - (y * y);
}
template <typename Packet>
inline Packet packetOp(const Packet& y) const {
const Packet one = internal::pset1<Packet>(1);
return internal::psub(one, internal::pmul(y, y));
}
};
namespace internal {
template <typename T>
struct functor_traits<scalar_tanh_fast_derivative_op<T> > {
enum {
Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 1,
PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasMul &&
packet_traits<T>::HasNegate
};
};
}
template <typename Scalar>
struct scalar_clip_op {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar
operator()(const Scalar& a, const Scalar& b) const {
return numext::mini(numext::maxi(a, -b), b);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet
packetOp(const Packet& a, const Packet& b) const {
return internal::pmin(internal::pmax(a, internal::pnegate(b)), b);
}
};
namespace internal {
template <typename Scalar>
struct functor_traits<scalar_clip_op<Scalar> > {
enum {
Cost = NumTraits<Scalar>::AddCost * 3,
PacketAccess = packet_traits<Scalar>::HasMax &&
packet_traits<Scalar>::HasMin &&
packet_traits<Scalar>::HasNegate
};
};
}
}
#endif | #include "tensorflow/core/kernels/eigen_activations.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
}
TEST(EigenBackwardSpatialConvolutionsTest, SigmoidFastDerivative) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
Tensor<float, 4> result(depth, rows, cols, batch);
result = input.unaryExpr(scalar_sigmoid_fast_derivative_op<float>());
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < cols; ++c) {
for (int r = 0; r < rows; ++r) {
for (int d = 0; d < depth; ++d) {
float val = input(d, r, c, b);
EigenApprox(result(d, r, c, b), (1 - val) * val);
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest, TanhFastDerivative) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
Tensor<float, 4> result(depth, rows, cols, batch);
result = input.unaryExpr(scalar_tanh_fast_derivative_op<float>());
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < cols; ++c) {
for (int r = 0; r < rows; ++r) {
for (int d = 0; d < depth; ++d) {
float val = input(d, r, c, b);
EigenApprox(result(d, r, c, b), 1 - (val * val));
}
}
}
}
}
TEST(EigenBackwardSpatialConvolutionsTest, Clip) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
Tensor<float, 4> result(depth, rows, cols, batch);
result = input.binaryExpr(input.constant(0.01), scalar_clip_op<float>());
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < cols; ++c) {
for (int r = 0; r < rows; ++r) {
for (int d = 0; d < depth; ++d) {
float val = input(d, r, c, b);
EigenApprox(result(d, r, c, b),
(std::min)((std::max)(val, -0.01f), 0.01f));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_activations.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_activations_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
88c2c198-269a-40f5-b83c-69515f289132 | cpp | google/langsvr | block_allocator | src/utils/block_allocator.h | src/utils/block_allocator_test.cc | #ifndef SRC_LANGSVR_UTILS_BLOCK_ALLOCATOR_H_
#define SRC_LANGSVR_UTILS_BLOCK_ALLOCATOR_H_
#include <stdint.h>
#include <array>
#include <cstring>
#include <utility>
namespace langsvr {
template <typename T>
inline constexpr T RoundUp(T alignment, T value) {
return ((value + alignment - 1) / alignment) * alignment;
}
template <typename TO, typename FROM>
inline TO Bitcast(FROM&& from) {
static_assert(sizeof(FROM) == sizeof(TO));
static_assert(std::is_trivially_copyable_v<std::decay_t<FROM>>);
static_assert(std::is_trivially_copyable_v<std::decay_t<TO>>);
TO to;
memcpy(reinterpret_cast<std::byte*>(&to), reinterpret_cast<const std::byte*>(&from),
sizeof(TO));
return to;
}
template <typename T, size_t BLOCK_SIZE = 64 * 1024, size_t BLOCK_ALIGNMENT = 16>
class BlockAllocator {
struct Pointers {
static constexpr size_t kMax = 32;
std::array<T*, kMax> ptrs;
Pointers* next;
Pointers* prev;
size_t count;
};
struct alignas(BLOCK_ALIGNMENT) Block {
uint8_t data[BLOCK_SIZE];
Block* next = nullptr;
};
template <bool IS_CONST>
class TView;
template <bool IS_CONST>
class TIterator {
using PointerTy = std::conditional_t<IS_CONST, const T*, T*>;
public:
bool operator==(const TIterator& other) const {
return ptrs == other.ptrs && idx == other.idx;
}
bool operator!=(const TIterator& other) const { return !(*this == other); }
TIterator& operator++() {
if (ptrs != nullptr) {
++idx;
if (idx >= ptrs->count) {
idx = 0;
ptrs = ptrs->next;
}
}
return *this;
}
TIterator& operator--() {
if (ptrs != nullptr) {
if (idx == 0) {
ptrs = ptrs->prev;
idx = ptrs->count - 1;
}
--idx;
}
return *this;
}
PointerTy operator*() const { return ptrs->ptrs[idx]; }
private:
friend TView<IS_CONST>;
explicit TIterator(const Pointers* p, size_t i) : ptrs(p), idx(i) {}
const Pointers* ptrs = nullptr;
size_t idx = 0;
};
template <bool IS_CONST>
class TView {
public:
TIterator<IS_CONST> begin() const {
return TIterator<IS_CONST>{allocator_->data.pointers.root, 0};
}
TIterator<IS_CONST> end() const { return TIterator<IS_CONST>{nullptr, 0}; }
private:
friend BlockAllocator;
explicit TView(BlockAllocator const* allocator) : allocator_(allocator) {}
BlockAllocator const* const allocator_;
};
public:
using Iterator = TIterator< false>;
using ConstIterator = TIterator< true>;
using View = TView<false>;
using ConstView = TView<true>;
BlockAllocator() = default;
BlockAllocator(BlockAllocator&& rhs) { std::swap(data, rhs.data); }
BlockAllocator& operator=(BlockAllocator&& rhs) {
if (this != &rhs) {
Reset();
std::swap(data, rhs.data);
}
return *this;
}
~BlockAllocator() { Reset(); }
View Objects() { return View(this); }
ConstView Objects() const { return ConstView(this); }
template <typename TYPE = T, typename... ARGS>
TYPE* Create(ARGS&&... args) {
static_assert(std::is_same<T, TYPE>::value || std::is_base_of<T, TYPE>::value,
"TYPE does not derive from T");
static_assert(std::is_same<T, TYPE>::value || std::has_virtual_destructor<T>::value,
"TYPE requires a virtual destructor when calling Create() for a type "
"that is not T");
auto* ptr = Allocate<TYPE>();
new (ptr) TYPE(std::forward<ARGS>(args)...);
AddObjectPointer(ptr);
data.count++;
return ptr;
}
void Reset() {
for (auto ptr : Objects()) {
ptr->~T();
}
auto* block = data.block.root;
while (block != nullptr) {
auto* next = block->next;
delete block;
block = next;
}
data = {};
}
size_t Count() const { return data.count; }
private:
BlockAllocator(const BlockAllocator&) = delete;
BlockAllocator& operator=(const BlockAllocator&) = delete;
template <typename TYPE>
TYPE* Allocate() {
static_assert(sizeof(TYPE) <= BLOCK_SIZE,
"Cannot construct TYPE with size greater than BLOCK_SIZE");
static_assert(alignof(TYPE) <= BLOCK_ALIGNMENT, "alignof(TYPE) is greater than ALIGNMENT");
auto& block = data.block;
block.current_offset = RoundUp(alignof(TYPE), block.current_offset);
if (block.current_offset + sizeof(TYPE) > BLOCK_SIZE) {
auto* prev_block = block.current;
block.current = new Block;
if (!block.current) {
return nullptr;
}
block.current->next = nullptr;
block.current_offset = 0;
if (prev_block) {
prev_block->next = block.current;
} else {
block.root = block.current;
}
}
auto* base = &block.current->data[0];
auto* ptr = Bitcast<TYPE*>(base + block.current_offset);
block.current_offset += sizeof(TYPE);
return ptr;
}
void AddObjectPointer(T* ptr) {
auto& pointers = data.pointers;
if (!pointers.current || pointers.current->count == Pointers::kMax) {
auto* prev_pointers = pointers.current;
pointers.current = Allocate<Pointers>();
if (!pointers.current) {
return;
}
pointers.current->next = nullptr;
pointers.current->prev = prev_pointers;
pointers.current->count = 0;
if (prev_pointers) {
prev_pointers->next = pointers.current;
} else {
pointers.root = pointers.current;
}
}
pointers.current->ptrs[pointers.current->count++] = ptr;
}
struct {
struct {
Block* root = nullptr;
Block* current = nullptr;
size_t current_offset = BLOCK_SIZE;
} block;
struct {
Pointers* root = nullptr;
Pointers* current = nullptr;
} pointers;
size_t count = 0;
} data;
};
}
#endif | #include "src/utils/block_allocator.h"
#include <vector>
#include "gtest/gtest.h"
namespace langsvr {
namespace {
struct LifetimeCounter {
explicit LifetimeCounter(size_t* count) : count_(count) { (*count)++; }
~LifetimeCounter() { (*count_)--; }
size_t* const count_;
};
using BlockAllocatorTest = testing::Test;
TEST_F(BlockAllocatorTest, Empty) {
using Allocator = BlockAllocator<int>;
Allocator allocator;
EXPECT_EQ(allocator.Count(), 0u);
for (int* i : allocator.Objects()) {
(void)i;
if ((true)) {
FAIL() << "BlockAllocator should be empty";
}
}
for (const int* i : static_cast<const Allocator&>(allocator).Objects()) {
(void)i;
if ((true)) {
FAIL() << "BlockAllocator should be empty";
}
}
}
TEST_F(BlockAllocatorTest, Count) {
using Allocator = BlockAllocator<int>;
for (size_t n : {0u, 1u, 10u, 16u, 20u, 32u, 50u, 64u, 100u, 256u, 300u, 512u, 500u, 512u}) {
Allocator allocator;
EXPECT_EQ(allocator.Count(), 0u);
for (size_t i = 0; i < n; i++) {
allocator.Create(123);
}
EXPECT_EQ(allocator.Count(), n);
}
}
TEST_F(BlockAllocatorTest, ObjectLifetime) {
using Allocator = BlockAllocator<LifetimeCounter>;
size_t count = 0;
{
Allocator allocator;
EXPECT_EQ(count, 0u);
allocator.Create(&count);
EXPECT_EQ(count, 1u);
allocator.Create(&count);
EXPECT_EQ(count, 2u);
allocator.Create(&count);
EXPECT_EQ(count, 3u);
}
EXPECT_EQ(count, 0u);
}
TEST_F(BlockAllocatorTest, MoveConstruct) {
using Allocator = BlockAllocator<LifetimeCounter>;
for (size_t n : {0u, 1u, 10u, 16u, 20u, 32u, 50u, 64u, 100u, 256u, 300u, 512u, 500u, 512u}) {
size_t count = 0;
{
Allocator allocator_a;
for (size_t i = 0; i < n; i++) {
allocator_a.Create(&count);
}
EXPECT_EQ(count, n);
EXPECT_EQ(allocator_a.Count(), n);
Allocator allocator_b{std::move(allocator_a)};
EXPECT_EQ(count, n);
EXPECT_EQ(allocator_b.Count(), n);
}
EXPECT_EQ(count, 0u);
}
}
TEST_F(BlockAllocatorTest, MoveAssign) {
using Allocator = BlockAllocator<LifetimeCounter>;
for (size_t n : {0u, 1u, 10u, 16u, 20u, 32u, 50u, 64u, 100u, 256u, 300u, 512u, 500u, 512u}) {
size_t count_a = 0;
size_t count_b = 0;
{
Allocator allocator_a;
for (size_t i = 0; i < n; i++) {
allocator_a.Create(&count_a);
}
EXPECT_EQ(count_a, n);
EXPECT_EQ(allocator_a.Count(), n);
Allocator allocator_b;
for (size_t i = 0; i < n; i++) {
allocator_b.Create(&count_b);
}
EXPECT_EQ(count_b, n);
EXPECT_EQ(allocator_b.Count(), n);
allocator_b = std::move(allocator_a);
EXPECT_EQ(count_a, n);
EXPECT_EQ(count_b, 0u);
EXPECT_EQ(allocator_b.Count(), n);
}
EXPECT_EQ(count_a, 0u);
EXPECT_EQ(count_b, 0u);
}
}
TEST_F(BlockAllocatorTest, ObjectOrder) {
using Allocator = BlockAllocator<int>;
Allocator allocator;
constexpr int N = 10000;
for (int i = 0; i < N; i++) {
allocator.Create(i);
}
{
int i = 0;
for (int* p : allocator.Objects()) {
EXPECT_EQ(*p, i);
i++;
}
EXPECT_EQ(i, N);
}
{
int i = 0;
for (const int* p : static_cast<const Allocator&>(allocator).Objects()) {
EXPECT_EQ(*p, i);
i++;
}
EXPECT_EQ(i, N);
}
}
TEST_F(BlockAllocatorTest, AddWhileIterating) {
using Allocator = BlockAllocator<size_t>;
Allocator allocator;
for (int i = 0; i < 20; i++) {
allocator.Create(allocator.Count());
std::vector<size_t*> seen;
for (auto* j : allocator.Objects()) {
if (*j % 3 == 0) {
allocator.Create(allocator.Count());
}
seen.push_back(j);
}
size_t n = 0;
for (auto* obj : allocator.Objects()) {
ASSERT_TRUE(n < seen.size());
EXPECT_EQ(seen[n++], obj);
}
}
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/utils/block_allocator.h | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/utils/block_allocator_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
7f3aca48-7578-4d48-bd73-bdc90e971678 | cpp | tensorflow/tensorflow | conv3d_transpose | tensorflow/lite/kernels/conv3d_transpose.cc | tensorflow/lite/kernels/conv3d_transpose_test.cc | #include "tensorflow/lite/kernels/internal/reference/conv3d_transpose.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace conv3d_transpose {
enum KernelType {
kReference,
kGenericOptimized,
};
const int kTensorNotAllocated = -1;
struct OpData {
Padding3DValues padding;
int col2im_id = kTensorNotAllocated;
int col2im_index;
bool need_col2im = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* opdata = new OpData;
return opdata;
}
void Free(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context,
TfLiteNode* node,
KernelType kernel_type) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
int temporaries_count = 0;
if (kernel_type == kGenericOptimized) {
if (data->col2im_id == kTensorNotAllocated) {
context->AddTensors(context, 1, &data->col2im_id);
}
data->col2im_index = temporaries_count++;
data->need_col2im = true;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(temporaries_count);
return kTfLiteOk;
}
TfLiteStatus ResizeOutputAndTemporaryTensors(
TfLiteContext* context, OpData* opdata, TfLiteConv3DTransposeParams* params,
const TfLiteTensor* shape_tensor, const TfLiteTensor* filter,
const TfLiteTensor* input, TfLiteTensor* col2im, TfLiteTensor* output) {
auto shape_data = GetTensorData<int32_t>(shape_tensor);
TF_LITE_ENSURE_EQ(context, shape_data[0], SizeOfDimension(input, 0));
TF_LITE_ENSURE_EQ(context, shape_data[4] % SizeOfDimension(filter, 3), 0);
const RuntimeShape& filter_shape = GetTensorShape(filter);
const int depth = shape_data[1];
const int height = shape_data[2];
const int width = shape_data[3];
const int filter_depth = filter_shape.Dims(0);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
int unused_out_width, unused_out_height, unused_out_depth;
opdata->padding = ComputePadding3DValues(
params->stride_height, params->stride_width, params->stride_depth,
params->dilation_height_factor, params->dilation_width_factor,
params->dilation_depth_factor, height, width, depth, filter_height,
filter_width, filter_depth, params->padding, &unused_out_height,
&unused_out_width, &unused_out_depth);
TF_LITE_ENSURE_EQ(context, unused_out_depth, SizeOfDimension(input, 1));
TF_LITE_ENSURE_EQ(context, unused_out_height, SizeOfDimension(input, 2));
TF_LITE_ENSURE_EQ(context, unused_out_width, SizeOfDimension(input, 3));
TfLiteIntArray* output_shape =
TfLiteIntArrayCreate(NumElements(shape_tensor));
for (int i = 0; i < output_shape->size; ++i) {
output_shape->data[i] = GetTensorData<int32_t>(shape_tensor)[i];
}
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
if (opdata->need_col2im) {
TfLiteIntArray* col2im_shape_array = TfLiteIntArrayCreate(2);
const RuntimeShape& input_shape = GetTensorShape(input);
col2im_shape_array->data[0] =
input_shape.Dims(1) * input_shape.Dims(2) * input_shape.Dims(3);
col2im_shape_array->data[1] =
filter_depth * filter_height * filter_width * filter_shape.Dims(3);
col2im->type = kTfLiteFloat32;
col2im->allocation_type = kTfLiteDynamic;
return context->ResizeTensor(context, col2im, col2im_shape_array);
}
return kTfLiteOk;
}
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConv3DTransposeParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, node->inputs->size == 3 || node->inputs->size == 4);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &output_shape));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input));
TF_LITE_ENSURE_EQ(context, output_shape->dims->size, 1);
TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 5);
TF_LITE_ENSURE_EQ(context, input->dims->size, 5);
TF_LITE_ENSURE_EQ(context, filter->dims->size, 5);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input, 4),
SizeOfDimension(filter, 4));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, filter->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
TF_LITE_ENSURE_TYPES_EQ(context, output_shape->type, kTfLiteInt32);
const TfLiteTensor* bias = GetInput(context, node, 3);
if (bias) {
TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input->type);
TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 3));
}
if (params->dilation_depth_factor > 1 || params->dilation_height_factor > 1 ||
params->dilation_width_factor > 1) {
kernel_type = kReference;
}
TF_LITE_ENSURE_STATUS(
AllocateTemporaryTensorsIfRequired(context, node, kernel_type));
TfLiteTensor* col2im = nullptr;
if (opdata->need_col2im) {
node->temporaries->data[opdata->col2im_index] = opdata->col2im_id;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node,
opdata->col2im_index, &col2im));
}
if (!IsConstantOrPersistentTensor(output_shape)) {
SetTensorToDynamic(output);
if (opdata->need_col2im) {
SetTensorToDynamic(col2im);
}
} else {
TF_LITE_ENSURE_STATUS(ResizeOutputAndTemporaryTensors(
context, opdata, params, output_shape, filter, input, col2im, output));
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return Prepare(kernel_type, context, node);
}
void EvalFloat(KernelType kernel_type, TfLiteContext* context, TfLiteNode* node,
TfLiteConv3DTransposeParams* params, OpData* opdata,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* col2im,
TfLiteTensor* output) {
float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max);
Conv3DTransposeParams runtime_params;
runtime_params.padding_values = opdata->padding;
runtime_params.stride_depth = params->stride_depth;
runtime_params.stride_height = params->stride_height;
runtime_params.stride_width = params->stride_width;
runtime_params.dilation_depth = params->dilation_depth_factor;
runtime_params.dilation_height = params->dilation_height_factor;
runtime_params.dilation_width = params->dilation_width_factor;
runtime_params.float_activation_min = output_activation_min;
runtime_params.float_activation_max = output_activation_max;
switch (kernel_type) {
case kReference: {
reference_ops::Conv3DTranspose(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output));
break;
}
case kGenericOptimized: {
optimized_ops::Conv3DTranspose(
runtime_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
GetTensorShape(col2im), GetTensorData<float>(col2im),
CpuBackendContext::GetFromContext(context));
} break;
}
}
TfLiteStatus Eval(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteConv3DTransposeParams*>(node->builtin_data);
OpData* opdata = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &output_shape));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input));
const TfLiteTensor* bias = GetInput(context, node, 3);
TfLiteTensor* col2im = opdata->need_col2im
? GetTemporary(context, node, opdata->col2im_index)
: nullptr;
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputAndTemporaryTensors(
context, opdata, params, output_shape,
filter, input, col2im, output));
}
if (params->dilation_depth_factor > 1 || params->dilation_height_factor > 1 ||
params->dilation_width_factor > 1) {
kernel_type = kReference;
}
switch (input->type) {
case kTfLiteFloat32:
EvalFloat(kernel_type, context, node, params, opdata, input, filter, bias,
col2im, output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return Eval(kernel_type, context, node);
}
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE_REF() {
static TfLiteRegistration r = {
conv3d_transpose::Init, conv3d_transpose::Free,
conv3d_transpose::Prepare<conv3d_transpose::kReference>,
conv3d_transpose::Eval<conv3d_transpose::kReference>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE_GENERIC_OPT() {
static TfLiteRegistration r = {
conv3d_transpose::Init, conv3d_transpose::Free,
conv3d_transpose::Prepare<conv3d_transpose::kGenericOptimized>,
conv3d_transpose::Eval<conv3d_transpose::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_CONV_3D_TRANSPOSE() {
return Register_CONV_3D_TRANSPOSE_GENERIC_OPT();
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
enum class TestType {
kConst = 0,
kDynamic = 1,
};
class Conv3dTransposeOpModel : public SingleOpModel {
public:
Conv3dTransposeOpModel(
std::initializer_list<int> output_shape_data, const TensorData& filter,
const TensorData& input, const TensorData& bias, const TensorData& output,
TestType test_type, Padding padding = Padding_VALID,
int32_t stride_depth = 1, int32_t stride_width = 1,
int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
if (test_type == TestType::kDynamic) {
output_shape_ = AddInput({TensorType_INT32, {5}});
} else {
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {5});
}
filter_ = AddInput(filter);
input_ = AddInput(input);
bias_ = AddInput(bias);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D_TRANSPOSE, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter({GetShape(output_shape_), GetShape(filter_),
GetShape(input_), GetShape(bias_)});
if (test_type == TestType::kDynamic) {
PopulateTensor(output_shape_, output_shape_data);
}
}
Conv3dTransposeOpModel(
std::initializer_list<int> output_shape_data, const TensorData& filter,
const TensorData& input, const TensorData& output, TestType test_type,
Padding padding = Padding_VALID, int32_t stride_depth = 1,
int32_t stride_width = 1, int32_t stride_height = 1,
ActivationFunctionType activation = ActivationFunctionType_NONE,
int32_t dilation_depth = 1, int32_t dilation_width = 1,
int32_t dilation_height = 1) {
if (test_type == TestType::kDynamic) {
output_shape_ = AddInput({TensorType_INT32, {5}});
} else {
output_shape_ = AddConstInput(TensorType_INT32, output_shape_data, {5});
}
filter_ = AddInput(filter);
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_CONV_3D_TRANSPOSE, BuiltinOptions_Conv3DOptions,
CreateConv3DOptions(builder_, padding, stride_depth, stride_width,
stride_height, activation, dilation_depth,
dilation_width, dilation_height)
.Union());
BuildInterpreter(
{GetShape(output_shape_), GetShape(filter_), GetShape(input_)});
if (test_type == TestType::kDynamic) {
PopulateTensor(output_shape_, output_shape_data);
}
}
void SetFilter(std::vector<float> f) { PopulateTensor(filter_, f); }
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetInput(std::vector<float> data) { PopulateTensor(input_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int output_shape_;
int input_;
int filter_;
int bias_;
int output_;
};
template <typename T>
std::vector<T> CreateRangeVector(int N) {
std::vector<T> result;
for (int i = 0; i < N; ++i) result.push_back(i);
return result;
}
class Conv3dTransposeOpTest : public ::testing::TestWithParam<TestType> {};
TEST_P(Conv3dTransposeOpTest, InvalidInputDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"input->dims->size != 5");
}
TEST_P(Conv3dTransposeOpTest, InvalidFilterDimsTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"filter->dims->size != 5");
}
TEST_P(Conv3dTransposeOpTest, MismatchChannelSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {1, 2, 2, 4, 1}},
{TensorType_FLOAT32, {1, 3, 2, 2, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam()),
"SizeOfDimension.input, 4. != SizeOfDimension.filter, 4.");
}
TEST_P(Conv3dTransposeOpTest, MismatchBiasSizeTest) {
EXPECT_DEATH_IF_SUPPORTED(
Conv3dTransposeOpModel m(
{1, 2, 3, 4, 5}, {TensorType_FLOAT32, {1, 3, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4, 2}}, {TensorType_FLOAT32, {3}},
{TensorType_FLOAT32, {}}, Conv3dTransposeOpTest::GetParam()),
"NumElements.bias. != SizeOfDimension.filter, 3.");
}
TEST_P(Conv3dTransposeOpTest, SimpleFloat32Test) {
Conv3dTransposeOpModel m(
{1, 3, 3, 5, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam());
m.SetInput(CreateRangeVector<float>(32));
m.SetFilter({-1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 3, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -4, -4, -8, -8, -12, -12, 1, 1, -16, -16, -18,
-16, -18, -20, -18, -24, 14, -12, 1, 17, 18, 4, 22, 4,
26, 4, 29, -29, -34, -32, -36, -30, -36, -30, -36, -30, 14,
2, -50, 2, -8, -26, -8, -26, -8, -26, 74, -44, -16, 50,
28, 4, 28, 4, 28, 4, 60, -62, -1, 33, 32, 38, 36,
42, 40, 46, 45, 1, -34, 50, 10, 54, 10, 58, 10, 62,
60, 0, -49, 1, -54, 0, -58, 0, -62, 0, -1, -1}));
}
TEST_P(Conv3dTransposeOpTest, PaddingValidTest) {
Conv3dTransposeOpModel m(
{1, 4, 5, 6, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 3, 4, 5, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam());
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 4, 5, 6, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -6, -6, -14, -14, -22, -22, -30, -30, -17,
-17, -22, -20, -50, -46, -58, -58, -66, -70, -74, -82,
-20, -54, -62, -40, -90, -106, -98, -118, -106, -130, -114,
-142, -20, -94, -102, -60, -130, -166, -138, -178, -146, -190,
-154, -202, -20, -134, -61, 1, -4, -60, -4, -64, -4,
-68, -4, -72, 77, -77, -80, -80, -160, -164, -164, -172,
-168, -180, -172, -188, -96, -96, -162, -98, -188, -282, -196,
-290, -204, -298, -212, -306, -18, -196, -202, -118, -228, -322,
-236, -330, -244, -338, -252, -346, -18, -216, -242, -138, -268,
-362, -276, -370, -284, -378, -292, -386, -18, -236, -202, 2,
-68, -78, -72, -78, -76, -78, -80, -78, 158, -80, -80,
-160, -240, -324, -244, -332, -248, -340, -252, -348, -176, -176,
-322, -178, -348, -442, -356, -450, -364, -458, -372, -466, -18,
-276, -362, -198, -388, -482, -396, -490, -404, -498, -412, -506,
-18, -296, -402, -218, -428, -522, -436, -530, -444, -538, -452,
-546, -18, -316, -362, 2, -148, -78, -152, -78, -156, -78,
-160, -78, 238, -80, 161, 1, 166, 2, 170, 2, 174,
2, 178, 2, 1, 1, 20, 2, 22, 164, 22, 168,
22, 172, 22, 176, 2, 178, 20, 2, 22, 184, 22,
188, 22, 192, 22, 196, 2, 198, 20, 2, 22, 204,
22, 208, 22, 212, 22, 216, 2, 218, -221, 1, -224,
222, -228, 226, -232, 230, -236, 234, 1, 237}));
}
TEST_P(Conv3dTransposeOpTest, PaddingSameTest) {
Conv3dTransposeOpModel m(
{1, 3, 4, 5, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {1, 3, 4, 5, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_SAME);
m.SetInput(CreateRangeVector<float>(120));
m.SetFilter({1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 4, 5, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, -1, -2, 0, -2, 0, -2, 0, -2, 0, -2, 0, -4, 2,
-4, 2, -4, 2, -4, 2, -2, 0, -4, 2, -4, 2, -4, 2,
-4, 2, -2, 0, -4, 2, -4, 2, -4, 2, -4, 2, 0, 0,
-2, 2, -6, 2, -10, 2, -14, 2, 0, 2, -18, 10, -18, 14,
-18, 18, -18, 22, 20, 22, -18, 30, -18, 34, -18, 38, -18, 42,
40, 42, -18, 50, -18, 54, -18, 58, -18, 62, 0, 0, -82, 2,
-86, 2, -90, 2, -94, 2, 80, 82, -18, 90, -18, 94, -18, 98,
-18, 102, 100, 102, -18, 110, -18, 114, -18, 118, -18, 122, 120, 122,
-18, 130, -18, 134, -18, 138, -18, 142}));
}
TEST_P(Conv3dTransposeOpTest, PaddingValidComplexTest) {
Conv3dTransposeOpModel m(
{2, 4, 3, 2, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 3, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID);
m.SetInput(CreateRangeVector<float>(24));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, 1, 1, -1, -2, 4, 2, 0, -1, -5, 1, 5, -2, 10, 2, -2,
-4, 8, 4, 8, -2, -18, 2, 18, -2, 26, 2, -2, -4, 8, 4, 24,
-2, -34, 2, 34, -1, 17, 1, -1, -2, 4, 2, 16, -1, -21, 1, 21,
-1, 25, 1, -1, -2, 4, 2, 24, -1, -29, 1, 29, -2, 58, 2, -2,
-4, 8, 4, 56, -2, -66, 2, 66, -2, 74, 2, -2, -4, 8, 4, 72,
-2, -82, 2, 82, -1, 41, 1, -1, -2, 4, 2, 40, -1, -45, 1, 45}));
}
TEST_P(Conv3dTransposeOpTest, StrideTest) {
Conv3dTransposeOpModel m(
{2, 4, 3, 2, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID,
2,
1, 1);
m.SetInput(CreateRangeVector<float>(16));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{-1, 1, 1, -1, -2, 4, 2, 0, -1, -5, 1, 5, -1, 1, 1, -1,
-2, 4, 2, 0, -1, -5, 1, 5, -1, 9, 1, -1, -2, 4, 2, 8,
-1, -13, 1, 13, -1, 9, 1, -1, -2, 4, 2, 8, -1, -13, 1, 13,
-1, 17, 1, -1, -2, 4, 2, 16, -1, -21, 1, 21, -1, 17, 1, -1,
-2, 4, 2, 16, -1, -21, 1, 21, -1, 25, 1, -1, -2, 4, 2, 24,
-1, -29, 1, 29, -1, 25, 1, -1, -2, 4, 2, 24, -1, -29, 1, 29}));
}
TEST_P(Conv3dTransposeOpTest, StrideAndPaddingSameTest) {
Conv3dTransposeOpModel m(
{2, 4, 2, 1, 2}, {TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 1, 2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_SAME,
2,
1, 1);
m.SetInput(CreateRangeVector<float>(16));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 2, 1, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-1, 1, -2, 4, -1, 1, -2, 4, -1, 9, -2,
4, -1, 9, -2, 4, -1, 17, -2, 4, -1, 17,
-2, 4, -1, 25, -2, 4, -1, 25, -2, 4}));
}
TEST_P(Conv3dTransposeOpTest, DilationTest) {
Conv3dTransposeOpModel m(
{1, 3, 3, 2, 2}, {TensorType_FLOAT32, {1, 2, 2, 2, 1}},
{TensorType_FLOAT32, {1, 3, 1, 1, 1}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID,
1,
1, 1,
ActivationFunctionType_NONE,
1, 1,
2);
m.SetInput(CreateRangeVector<float>(3));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(1, 3, 3, 2, 2));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, -1, 1, 1, 0, 0, 0, 0, -1, 1, 1, -1,
2, -2, 2, 2, 0, 0, 0, 0, -2, 2, 2, -2}));
}
TEST_P(Conv3dTransposeOpTest, BiasTest) {
Conv3dTransposeOpModel m({2, 4, 3, 2, 2},
{TensorType_FLOAT32, {2, 2, 2, 2, 2}},
{TensorType_FLOAT32, {2, 3, 2, 1, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_FLOAT32, {}},
Conv3dTransposeOpTest::GetParam(), Padding_VALID);
m.SetInput(CreateRangeVector<float>(24));
m.SetFilter({1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1,
1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAre(2, 4, 3, 2, 2));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{0, 3, 2, 1, -1, 6, 3, 2, 0, -3, 2, 7, -1, 12, 3, 0,
-3, 10, 5, 10, -1, -16, 3, 20, -1, 28, 3, 0, -3, 10, 5, 26,
-1, -32, 3, 36, 0, 19, 2, 1, -1, 6, 3, 18, 0, -19, 2, 23,
0, 27, 2, 1, -1, 6, 3, 26, 0, -27, 2, 31, -1, 60, 3, 0,
-3, 10, 5, 58, -1, -64, 3, 68, -1, 76, 3, 0, -3, 10, 5, 74,
-1, -80, 3, 84, 0, 43, 2, 1, -1, 6, 3, 42, 0, -43, 2, 47}));
}
INSTANTIATE_TEST_SUITE_P(Conv3dTransposeOpTest, Conv3dTransposeOpTest,
::testing::Values(TestType::kConst,
TestType::kDynamic));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/conv3d_transpose.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/conv3d_transpose_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab71cfba-e3ad-4152-9ff5-0fff1613d9b5 | cpp | tensorflow/tensorflow | feature_util | tensorflow/core/example/feature_util.cc | tensorflow/core/example/feature_util_test.cc | #include "tensorflow/core/example/feature_util.h"
#include <string>
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace internal {
Feature& ExampleFeature(absl::string_view name, Example* example) {
return *GetFeature(name, example);
}
}
template <>
bool HasFeature<>(absl::string_view key, const Features& features) {
return features.feature().contains(internal::ProtoMapKey(key));
}
template <>
bool HasFeature<protobuf_int64>(absl::string_view key,
const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kInt64List);
}
template <>
bool HasFeature<float>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kFloatList);
}
template <>
bool HasFeature<std::string>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
template <>
bool HasFeature<tstring>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
bool HasFeatureList(absl::string_view key,
const SequenceExample& sequence_example) {
return sequence_example.feature_lists().feature_list().contains(
internal::ProtoMapKey(key));
}
template <>
const protobuf::RepeatedField<protobuf_int64>& GetFeatureValues<protobuf_int64>(
const Feature& feature) {
return feature.int64_list().value();
}
template <>
protobuf::RepeatedField<protobuf_int64>* GetFeatureValues<protobuf_int64>(
Feature* feature) {
return feature->mutable_int64_list()->mutable_value();
}
template <>
const protobuf::RepeatedField<float>& GetFeatureValues<float>(
const Feature& feature) {
return feature.float_list().value();
}
template <>
protobuf::RepeatedField<float>* GetFeatureValues<float>(Feature* feature) {
return feature->mutable_float_list()->mutable_value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<tstring>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<std::string>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<tstring>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<std::string>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
const protobuf::RepeatedPtrField<Feature>& GetFeatureList(
absl::string_view key, const SequenceExample& sequence_example) {
return sequence_example.feature_lists()
.feature_list()
.at(internal::ProtoMapKey(key))
.feature();
}
protobuf::RepeatedPtrField<Feature>* GetFeatureList(
absl::string_view feature_list_key, SequenceExample* sequence_example) {
return (*sequence_example->mutable_feature_lists()
->mutable_feature_list())[internal::ProtoMapKey(
feature_list_key)]
.mutable_feature();
}
template <>
void ClearFeatureValues<protobuf_int64>(Feature* feature) {
feature->mutable_int64_list()->Clear();
}
template <>
void ClearFeatureValues<float>(Feature* feature) {
feature->mutable_float_list()->Clear();
}
template <>
void ClearFeatureValues<std::string>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
void ClearFeatureValues<tstring>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
Features* GetFeatures<Features>(Features* proto) {
return proto;
}
template <>
Features* GetFeatures<Example>(Example* proto) {
return proto->mutable_features();
}
template <>
Features* GetFeatures<SequenceExample>(SequenceExample* proto) {
return proto->mutable_context();
}
template <>
const Features& GetFeatures<Features>(const Features& proto) {
return proto;
}
template <>
const Features& GetFeatures<Example>(const Example& proto) {
return proto.features();
}
template <>
const Features& GetFeatures<SequenceExample>(const SequenceExample& proto) {
return proto.context();
}
} | #include "tensorflow/core/example/feature_util.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
const float kTolerance = 1e-5;
TEST(GetFeatureValuesInt64Test, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_int64_list()->add_value(42);
auto values = GetFeatureValues<protobuf_int64>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromSequenceExampleContext) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValue) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1,
example.features().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.features().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<protobuf_int64>(&feature)->Add(42);
ASSERT_EQ(1, feature.int64_list().value_size());
EXPECT_EQ(42, feature.int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1, example.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.context().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistence) {
Example example;
ASSERT_FALSE(HasFeature("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(0);
EXPECT_TRUE(HasFeature("tag", example));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistenceForSequenceExample) {
SequenceExample seq_example;
ASSERT_FALSE(HasFeature("tag", seq_example));
GetFeatureValues<protobuf_int64>("tag", &seq_example)->Add(0);
EXPECT_TRUE(HasFeature("tag", seq_example));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistenceForSequenceExample) {
SequenceExample sequence_example;
GetFeatureValues<float>("tag", &sequence_example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", sequence_example));
GetFeatureValues<protobuf_int64>("tag", &sequence_example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", sequence_example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", sequence_example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CopyIterableToAField) {
Example example;
std::vector<int> values{1, 2, 3};
std::copy(values.begin(), values.end(),
protobuf::RepeatedFieldBackInserter(
GetFeatureValues<protobuf_int64>("tag", &example)));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_float_list()->add_value(3.14);
auto values = GetFeatureValues<float>(feature);
ASSERT_EQ(1, values.size());
EXPECT_NEAR(3.14, values.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<float>(&feature)->Add(3.14);
ASSERT_EQ(1, feature.float_list().value_size());
EXPECT_NEAR(3.14, feature.float_list().value(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValue) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1,
example.features().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14,
example.features().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1, example.context().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14, example.context().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(HasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistenceForDeprecatedMethod) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(ExampleHasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(ExampleHasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_bytes_list()->add_value("FOO");
auto values = GetFeatureValues<std::string>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ("FOO", values.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueToFeature) {
Feature feature;
*GetFeatureValues<std::string>(&feature)->Add() = "FOO";
ASSERT_EQ(1, feature.bytes_list().value_size());
EXPECT_EQ("FOO", feature.bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValue) {
Example example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1,
example.features().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO",
example.features().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueSequenceExample) {
SequenceExample example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1, example.context().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO", example.context().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<std::string>("tag", example));
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
EXPECT_TRUE(HasFeature<std::string>("tag", example));
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainer) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainerWithStringViewKey) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
absl::string_view key("tag");
AppendFeatureValues(values, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest,
FloatValuesUsingInitializerListWithStringViewKey) {
Example example;
absl::string_view key("tag");
AppendFeatureValues({1.1, 2.2, 3.3}, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIterators) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIteratorsWithStringViewKey) {
Example example;
absl::string_view key("tag");
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
SetFeatureValues({10.1, 20.2, 30.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(10.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(20.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(30.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, ContainerOfStringView) {
Example example;
std::vector<std::string> values = {"hello", "world"};
std::vector<absl::string_view> values_string_view(values.begin(),
values.end());
SetFeatureValues(values_string_view, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(tag_ro.size(), 2);
EXPECT_EQ(tag_ro.Get(0), "hello");
EXPECT_EQ(tag_ro.Get(1), "world");
}
TEST(AppendFeatureValuesTest, Int64ValuesUsingInitializerList) {
Example example;
std::vector<protobuf_int64> values{1, 2, 3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringValuesUsingInitializerList) {
Example example;
AppendFeatureValues({"FOO", "BAR", "BAZ"}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringVariablesUsingInitializerList) {
Example example;
string string1("FOO");
string string2("BAR");
string string3("BAZ");
AppendFeatureValues({string1, string2, string3}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingInitializerList) {
Example example;
AppendFeatureValues({absl::string_view("FOO"), absl::string_view("BAR"),
absl::string_view("BAZ")},
"tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingIterators) {
Example example;
std::vector<absl::string_view> strings;
strings.push_back("FOO");
strings.push_back("BAR");
strings.push_back("BAZ");
AppendFeatureValues(strings.begin(), strings.end(), "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(GetFeatureTest, WritesAVectorToFeature) {
Example example;
Feature* feature = GetFeature("tag", &example);
AppendFeatureValues<float>({1.1, 2.2, 3.3}, feature);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(GetFeatureTest, ReadsAVectorFromFeature) {
Example example;
AppendFeatureValues<float>({1.1, 2.2, 3.3}, "tag", &example);
const Feature& feature = GetFeature("tag", example);
auto tag_ro = GetFeatureValues<float>(feature);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, ReadsASingleValueFromContext) {
SequenceExample se;
(*se.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto values = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(SequenceExampleTest, WritesASingleValueToContext) {
SequenceExample se;
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
ASSERT_EQ(1, se.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, se.context().feature().at("tag").int64_list().value(0));
}
TEST(SequenceExampleTest, AppendFeatureValuesToContextSingleArg) {
SequenceExample se;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", se.mutable_context());
auto tag_ro = GetFeatureValues<float>("tag", se.context());
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, CheckTypedFieldExistence) {
SequenceExample se;
GetFeatureValues<float>("tag", se.mutable_context())->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", se.context()));
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", se.context()));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(SequenceExampleTest, ReturnsExistingFeatureLists) {
SequenceExample se;
(*se.mutable_feature_lists()->mutable_feature_list())["tag"]
.mutable_feature()
->Add();
auto feature = GetFeatureList("tag", se);
ASSERT_EQ(1, feature.size());
}
TEST(SequenceExampleTest, CreatesNewFeatureLists) {
SequenceExample se;
GetFeatureList("tag", &se)->Add();
EXPECT_EQ(1, se.feature_lists().feature_list().at("tag").feature_size());
}
TEST(SequenceExampleTest, CheckFeatureListExistence) {
SequenceExample se;
ASSERT_FALSE(HasFeatureList("tag", se));
GetFeatureList("tag", &se)->Add();
ASSERT_TRUE(HasFeatureList("tag", se));
}
TEST(SequenceExampleTest, AppendFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-1", "cam2-2"},
GetFeatureList("images", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-2\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, AppendFeatureValuesWithVectors) {
SequenceExample se;
std::vector<float> readings{1.0, 2.5, 5.0};
AppendFeatureValues(readings, GetFeatureList("movie_ratings", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"feature_lists {\n"
" feature_list {\n"
" key: \"movie_ratings\"\n"
" value {\n"
" feature {\n"
" float_list {\n"
" value: 1\n"
" value: 2.5\n"
" value: 5\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetContextFeatureValuesWithInitializerList) {
SequenceExample se;
SetFeatureValues({101, 102, 103}, "ids", se.mutable_context());
SetFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({4, 5, 6}, "ids", se.mutable_context());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
SetFeatureValues({4, 5, 6}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"}, GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("more-images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"},
GetFeatureList("more-images", &se)->Mutable(0));
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
" feature_list {\n"
" key: \"more-images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(MaybeGetFeatureValuesTest, ReturnsNullPtr) {
const Example example;
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(tag, nullptr);
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleInt) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ(42, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleFloat) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(0.3);
auto tag = MaybeGetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_FLOAT_EQ(0.3, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleString) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("entry");
auto tag = MaybeGetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ("entry", tag->Get(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/feature_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/feature_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
732385c8-9ae6-42da-980c-ef56b8243f9a | cpp | tensorflow/tensorflow | xplane_to_op_stats | tensorflow/core/profiler/convert/xplane_to_op_stats.cc | tensorflow/core/profiler/convert/xplane_to_op_stats_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/utils/device_caps_utils.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::FindPlanesWithPrefix;
using tsl::profiler::FindTensorCorePlanes;
std::string Hostname(const XSpace& space) {
if (space.hostnames().empty()) return "localhost";
DCHECK_EQ(space.hostnames_size(), 1);
const std::string& hostname = space.hostnames(0);
return hostname;
}
}
PerfEnv MakePerfEnv(double peak_tera_flops_per_second,
std::vector<double> peak_bws) {
PerfEnv result;
result.set_peak_tera_flops_per_second(peak_tera_flops_per_second);
for (const auto bw : peak_bws) {
result.add_peak_bws_giga_bytes_per_second(bw);
}
result.set_ridge_point(tsl::profiler::TeraToGiga(peak_tera_flops_per_second) /
peak_bws[MemBwType::MEM_BW_TYPE_HBM_RW]);
return result;
}
PerfEnv GetPerfEnvFromXPlane(const XPlane& device_plane) {
DeviceCapabilities cap = GetDeviceCaps(device_plane);
if (!absl::StartsWith(device_plane.name(), kTpuPlanePrefix)) {
double peak_tera_flops_per_second =
cap.num_cores() *
tsl::profiler::GigaToTera(GetFlopMaxThroughputPerSM(cap));
double hbm_bw_giga_bytes_per_second =
tsl::profiler::UniToGiga(cap.memory_bandwidth());
double shm_giga_bytes_per_second =
cap.num_cores() *
tsl::profiler::UniToGiga(GetSharedMemoryBandwidthPerSM(cap));
return MakePerfEnv(peak_tera_flops_per_second,
{hbm_bw_giga_bytes_per_second,
shm_giga_bytes_per_second,
shm_giga_bytes_per_second});
} else {
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(&device_plane);
auto peak_tera_flops_per_second =
visitor.GetStat(StatType::kDevCapPeakTeraflopsPerSecond);
auto peak_tera_flops_per_second_val =
peak_tera_flops_per_second.has_value()
? peak_tera_flops_per_second->DoubleValue()
: 0.0;
auto peak_hbm_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakHbmBwGigabytesPerSecond);
auto peak_hbm_bw_giga_bytes_per_second_val =
peak_hbm_bw_giga_bytes_per_second.has_value()
? peak_hbm_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_rd_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramRdBwGigabytesPerSecond);
auto peak_sram_rd_bw_giga_bytes_per_second_val =
peak_sram_rd_bw_giga_bytes_per_second.has_value()
? peak_sram_rd_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_wr_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramWrBwGigabytesPerSecond);
auto peak_sram_wr_bw_giga_bytes_per_second_val =
peak_sram_wr_bw_giga_bytes_per_second.has_value()
? peak_sram_wr_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
return MakePerfEnv(peak_tera_flops_per_second_val,
{peak_hbm_bw_giga_bytes_per_second_val,
peak_sram_rd_bw_giga_bytes_per_second_val,
peak_sram_wr_bw_giga_bytes_per_second_val});
}
}
void SetRunEnvironment(const XSpace& space, RunEnvironment* env) {
env->set_host_count(1);
env->set_task_count(1);
env->mutable_hostnames()->insert({Hostname(space), true});
std::vector<const XPlane*> gpu_planes =
FindPlanesWithPrefix(space, kGpuPlanePrefix);
if (!gpu_planes.empty()) {
absl::string_view gpu_model =
GpuModelName(GetDeviceCaps(*gpu_planes.front()));
if (!gpu_model.empty()) {
env->set_device_type(std::string(gpu_model));
} else {
env->set_device_type("GPU");
}
env->set_device_core_count(gpu_planes.size());
} else if (std::vector<const XPlane*> tpu_planes =
FindTensorCorePlanes(space);
!tpu_planes.empty()) {
XPlaneVisitor visitor =
tsl::profiler::CreateTfXPlaneVisitor(tpu_planes.at(0));
auto xstat = visitor.GetStat(StatType::kDeviceTypeString);
if (xstat.has_value()) {
env->set_device_type(std::string(xstat->StrOrRefValue()));
}
env->set_device_core_count(tpu_planes.size());
} else {
env->set_device_type("CPU");
env->set_device_core_count(0);
}
}
void PropagateXSpaceDiagnosticsToOpStats(const XSpace& space,
OpStats* op_stats) {
if (!space.errors().empty()) {
absl::flat_hash_set<std::string> unique_errors;
unique_errors.insert(space.errors().begin(), space.errors().end());
*op_stats->mutable_diagnostics()->mutable_errors() = {unique_errors.begin(),
unique_errors.end()};
}
if (!space.warnings().empty()) {
absl::flat_hash_set<std::string> unique_warnings;
unique_warnings.insert(space.warnings().begin(), space.warnings().end());
*op_stats->mutable_diagnostics()->mutable_warnings() = {
unique_warnings.begin(), unique_warnings.end()};
}
}
void SetProgramIdToNameMap(const HloProtoMap& hlo_proto_map,
tensorflow::profiler::OpStats& op_stats) {
auto& program_id_to_name_map = *op_stats.mutable_program_id_to_name_map();
for (const auto& [program_id, hlo_proto] : hlo_proto_map) {
program_id_to_name_map[program_id] = hlo_proto->hlo_module().name();
}
}
OpStats ConvertXSpaceToOpStats(const XSpace& space,
const OpStatsOptions& options) {
OpStats op_stats;
StepEvents step_events;
PropagateXSpaceDiagnosticsToOpStats(space, &op_stats);
OpMetricsDbCombiner op_metrics_db_combiner(
op_stats.mutable_device_op_metrics_db());
SetRunEnvironment(space, op_stats.mutable_run_environment());
KernelReportMap reports;
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(space, kTpuPlanePrefix);
const bool is_gpu = device_planes.empty();
if (is_gpu) {
device_planes = FindPlanesWithPrefix(space, kGpuPlanePrefix);
}
const bool is_tpu = !is_gpu;
for (const XPlane* device_trace : device_planes) {
XPlane aggregated_xplane;
bool use_aggregated_xplane = false;
if (options.generate_op_metrics_db) {
if (!op_stats.has_perf_env()) {
*op_stats.mutable_perf_env() = GetPerfEnvFromXPlane(*device_trace);
}
if (!is_tpu) {
OpMetricsDb device_op_metrics_db =
ConvertDeviceTraceXPlaneToOpMetricsDb(*device_trace);
op_metrics_db_combiner.Combine(device_op_metrics_db);
} else {
AggregateXPlane(*device_trace, aggregated_xplane);
use_aggregated_xplane = true;
OpMetricsDb device_op_metrics_db =
ConvertTpuDeviceTraceXPlaneToOpMetricsDb(aggregated_xplane);
op_metrics_db_combiner.Combine(device_op_metrics_db);
}
}
if (options.generate_step_db) {
StepEvents device_step_events = ConvertDeviceTraceXPlaneToStepEvents(
use_aggregated_xplane ? aggregated_xplane : *device_trace);
if (is_tpu) {
IntersectCombineStepEvents(device_step_events, &step_events);
} else {
UnionCombineStepEvents(device_step_events, &step_events);
}
}
if (options.generate_kernel_stats_db) {
ConvertDeviceTraceXPlaneToKernelReports(*device_trace,
{}, &reports);
}
}
if (options.generate_kernel_stats_db) {
CopyTopKDurationKernelReportsToDb(reports,
op_stats.mutable_kernel_stats_db());
}
bool has_device = !device_planes.empty();
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
if (options.generate_op_metrics_db) {
*op_stats.mutable_host_op_metrics_db() =
ConvertHostThreadsXPlaneToOpMetricsDb(*host_plane);
}
if (options.generate_step_db && !has_device) {
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, nullptr);
UnionCombineStepEvents(host_step_events, &step_events);
}
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
auto stat = visitor.GetStat(StatType::kMatrixUnitUtilizationPercent);
if (stat.has_value()) {
op_stats.mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(stat->DoubleValue());
}
}
if (options.generate_step_db) {
if (is_tpu) {
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, false, step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(step_events);
} else {
StepEvents nonoverlapped_step_events =
ToNonOverlappedStepEvents(step_events);
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps,
nonoverlapped_step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(nonoverlapped_step_events);
}
}
if (!is_tpu) {
CoreDetails& details =
(*op_stats.mutable_core_id_to_details())[kDefaultGpuLocalCoreId];
details.set_hostname(Hostname(space));
}
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(space);
SetProgramIdToNameMap(hlo_proto_map, op_stats);
return op_stats;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/profiler/utils/group_events.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/multi_xplanes_to_op_stats.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
TEST(ConvertXPlaneToOpStats, GpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
XPlaneBuilder device_plane(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(125.34, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(139.26, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, GpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane1.AddStatValue(*device_plane1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
XPlaneBuilder device_plane2(
GetOrCreateGpuXPlane(space.get(), 1));
device_plane2.AddStatValue(*device_plane2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("Nvidia GPU", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
TEST(ConvertXPlaneToOpStats, GpuStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kCorrelationId}});
XPlaneBuilder device_plane_builder(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kCorrelationId}});
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 0);
PrecisionStats precision_stats =
op_stats.device_op_metrics_db().precision_stats();
EXPECT_EQ(precision_stats.compute_16bit_ps(), 0);
EXPECT_EQ(precision_stats.compute_32bit_ps(), 40);
}
TEST(ConvertXPlaneToOpStats, PropagateAndDedupErrors) {
XSpace space;
static constexpr char kError[] = "host: error";
*space.add_errors() = kError;
*space.add_errors() = kError;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(1, op_stats.diagnostics().errors_size());
EXPECT_EQ(kError, op_stats.diagnostics().errors(0));
}
TEST(ConvertXPlaneToOpStats, Hostnames) {
XSpace space;
static constexpr char kHost[] = "host1";
*space.add_hostnames() = kHost;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(
kHost,
op_stats.core_id_to_details().at(kDefaultGpuLocalCoreId).hostname());
}
void BuildXSpaceForTest(XSpace& xspace, absl::string_view hostname) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 456;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&xspace));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &executor_thread, "aaa:bbb", 30, 70);
xspace.add_hostnames(std::string(hostname));
}
TEST(ConvertXPlaneToOpStats, TestConvertMultiXSpacesToCombinedOpStats) {
static constexpr char kHost1[] = "host1";
static constexpr char kHost2[] = "host2";
auto xspace1 = std::make_unique<XSpace>();
auto xspace2 = std::make_unique<XSpace>();
BuildXSpaceForTest(*xspace1, kHost1);
BuildXSpaceForTest(*xspace2, kHost2);
std::vector<std::string> xspace_paths;
xspace_paths.push_back("host1.pb");
xspace_paths.push_back("host2.pb");
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace1));
xspaces.push_back(std::move(xspace2));
auto session_snapshot_or =
SessionSnapshot::Create(std::move(xspace_paths), std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats combined_op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &combined_op_stats))
<< "Failed to convert multi XSpace to OpStats";
ASSERT_EQ(combined_op_stats.host_op_metrics_db().metrics_db_size(), 2);
const auto& metric = combined_op_stats.host_op_metrics_db().metrics_db(1);
EXPECT_EQ(metric.name(), "aaa");
EXPECT_EQ(metric.category(), "bbb");
EXPECT_EQ(metric.self_time_ps(), 140);
ASSERT_EQ(combined_op_stats.step_db().step_sequence_size(), 1);
ASSERT_EQ(
combined_op_stats.step_db().step_sequence(0).step_info_per_core_size(),
2);
const auto& step_info_per_core =
combined_op_stats.step_db().step_sequence(0).step_info_per_core();
EXPECT_TRUE(step_info_per_core.contains(kDefaultGpuLocalCoreId));
EXPECT_TRUE(step_info_per_core.contains(1000 + kDefaultGpuLocalCoreId));
const auto& core_details_map = combined_op_stats.core_id_to_details();
EXPECT_EQ(kHost1, core_details_map.at(kDefaultGpuLocalCoreId).hostname());
EXPECT_EQ(kHost2,
core_details_map.at(1000 + kDefaultGpuLocalCoreId).hostname());
}
TEST(ConvertXPlaneToOpStats, RunEnvironmentExtractedFromTpuPlane) {
XSpace xspace;
for (int i : {0, 1, 2, 3}) {
GetOrCreateTpuXPlane(&xspace, i, "TPU V4", 0, 0);
}
OpStats op_stats = ConvertXSpaceToOpStats(xspace, OpStatsOptions());
EXPECT_EQ(op_stats.run_environment().device_type(), "TPU V4");
EXPECT_EQ(op_stats.run_environment().device_core_count(), 4);
}
TEST(ConvertXPlaneToOpStats, TpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 900.0;
XPlaneBuilder device_plane(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(141, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(156.67, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, TpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("TPU V4", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, TpuDeviceTraceToStepDb) {
auto space = std::make_unique<XSpace>();
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 1000.0;
XPlaneBuilder xplane_builder(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("op_name");
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSymbolId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs)),
10);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"tf_op_name");
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloCategory)),
"category");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(0);
event.SetDurationNs(10);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
EXPECT_THAT(op_stats.device_op_metrics_db().metrics_db(),
UnorderedElementsAre(Property(&OpMetrics::name, "op_name"),
Property(&OpMetrics::name, "IDLE")));
}
TEST(ConvertXPlaneToOpStats, TpuMultiDeviceStepDbTest) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane_builder1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane_builder2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
device_plane_builder1.ReserveLines(1);
device_plane_builder2.ReserveLines(1);
XStatMetadata* kGroupId1 = device_plane_builder1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = device_plane_builder1.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventMetadata* event_metadata =
device_plane_builder1.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId1, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
line = device_plane_builder2.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XStatMetadata* kGroupId2 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata2 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 1");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId2, 1);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XStatMetadata* kGroupId3 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata3 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata3->set_name("Step 2");
XEventBuilder event_builder3 = line.AddEvent(*event_metadata3);
event_builder3.AddStatValue(*kGroupId3, 2);
event_builder3.SetDurationNs(100);
event_builder3.SetOffsetNs(300);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats op_stats = ConvertXSpaceToOpStats(*space, options);
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f6caa2d5-6866-43cc-a9a0-f63d605eeb98 | cpp | tensorflow/tensorflow | buffer_pool | third_party/xla/xla/tsl/profiler/utils/buffer_pool.cc | third_party/xla/xla/tsl/profiler/utils/buffer_pool_test.cc | #include "xla/tsl/profiler/utils/buffer_pool.h"
#include <ios>
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
namespace tsl {
namespace profiler {
BufferPool::BufferPool(size_t buffer_size_in_bytes)
: buffer_size_in_bytes_(buffer_size_in_bytes) {}
BufferPool::~BufferPool() { DestroyAllBuffers(); }
uint8_t* BufferPool::GetOrCreateBuffer() {
{
mutex_lock lock(buffers_mutex_);
if (!buffers_.empty()) {
uint8_t* buffer = buffers_.back();
buffers_.pop_back();
if (!buffer) {
LOG(ERROR) << "A reused buffer must not be null!";
return nullptr;
}
VLOG(3) << "Reused Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
return buffer;
}
}
constexpr size_t kBufferAlignSize = 8;
uint8_t* buffer = reinterpret_cast<uint8_t*>(
port::AlignedMalloc(buffer_size_in_bytes_, kBufferAlignSize));
if (buffer == nullptr) {
LOG(WARNING) << "Buffer not allocated.";
return nullptr;
}
VLOG(3) << "Allocated Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec
<< " size=" << buffer_size_in_bytes_;
return buffer;
}
void BufferPool::ReclaimBuffer(uint8_t* buffer) {
mutex_lock lock(buffers_mutex_);
buffers_.push_back(buffer);
VLOG(3) << "Reclaimed Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
}
void BufferPool::DestroyAllBuffers() {
mutex_lock lock(buffers_mutex_);
for (uint8_t* buffer : buffers_) {
VLOG(3) << "Freeing Buffer, buffer:" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
port::AlignedFree(buffer);
}
buffers_.clear();
}
size_t BufferPool::GetBufferSizeInBytes() const {
return buffer_size_in_bytes_;
}
}
} | #include "xla/tsl/profiler/utils/buffer_pool.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(BufferPoolTest, GetOrCreateBufferAlloc) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(second_buffer, first_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
TEST(BufferPoolTest, GetOrCreateBufferReuse) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(buffer, nullptr);
buffer[0] = 0xFF;
uint8_t* previous_buffer = buffer;
buffer_pool.ReclaimBuffer(buffer);
uint8_t* reused_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_EQ(reused_buffer, previous_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
reused_buffer[idx] = 0xCD;
}
buffer_pool.ReclaimBuffer(reused_buffer);
}
TEST(BufferPoolTest, DestroyAllBuffers) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
buffer_pool.DestroyAllBuffers();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xEF;
}
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
second_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/buffer_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/buffer_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67126d38-e555-4752-b7c6-e14f7ad54bc5 | cpp | google/cel-cpp | test_ast_helpers | checker/internal/test_ast_helpers.cc | checker/internal/test_ast_helpers_test.cc | #include "checker/internal/test_ast_helpers.h"
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "common/ast.h"
#include "extensions/protobuf/ast_converters.h"
#include "internal/status_macros.h"
#include "parser/parser.h"
namespace cel::checker_internal {
using ::cel::extensions::CreateAstFromParsedExpr;
using ::google::api::expr::parser::Parse;
absl::StatusOr<std::unique_ptr<Ast>> MakeTestParsedAst(
absl::string_view expression) {
CEL_ASSIGN_OR_RETURN(auto parsed, Parse(expression));
return CreateAstFromParsedExpr(std::move(parsed));
}
} | #include "checker/internal/test_ast_helpers.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "base/ast_internal/ast_impl.h"
#include "common/ast.h"
#include "internal/testing.h"
namespace cel::checker_internal {
namespace {
using ::absl_testing::StatusIs;
using ::cel::ast_internal::AstImpl;
TEST(MakeTestParsedAstTest, Works) {
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> ast, MakeTestParsedAst("123"));
AstImpl& impl = AstImpl::CastFromPublicAst(*ast);
EXPECT_TRUE(impl.root_expr().has_const_expr());
}
TEST(MakeTestParsedAstTest, ForwardsParseError) {
EXPECT_THAT(MakeTestParsedAst("%123"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/internal/test_ast_helpers.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/internal/test_ast_helpers_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
87fe2bee-23db-45e0-a461-fab9ad7c0991 | cpp | tensorflow/tensorflow | null_request_cost_accessor | tensorflow/core/common_runtime/null_request_cost_accessor.cc | tensorflow/core/common_runtime/null_request_cost_accessor_test.cc | #include "tensorflow/core/common_runtime/null_request_cost_accessor.h"
namespace tensorflow {
RequestCost* NullRequestCostAccessor::GetRequestCost() const { return nullptr; }
REGISTER_REQUEST_COST_ACCESSOR("null", NullRequestCostAccessor);
} | #include "tensorflow/core/common_runtime/null_request_cost_accessor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(NullRequestCostAccessorTest, Basic) {
NullRequestCostAccessor accessor;
EXPECT_EQ(accessor.GetRequestCost(), nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/null_request_cost_accessor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/null_request_cost_accessor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6b0970d5-609b-4189-820c-8068b1d8b2d2 | cpp | tensorflow/tensorflow | memory_management | tensorflow/lite/delegates/gpu/common/memory_management.cc | tensorflow/lite/delegates/gpu/common/memory_management_test.cc | #include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include <cstddef>
#include <numeric>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace {
size_t TotalSize(const ObjectsAssignment<size_t>& assignment) {
return std::accumulate(assignment.object_sizes.begin(),
assignment.object_sizes.end(), static_cast<size_t>(0));
}
}
OffsetsAssignment ObjectsToOffsets(
const ObjectsAssignment<size_t>& obj_assignment) {
size_t num_tensors = obj_assignment.object_ids.size();
size_t num_objects = obj_assignment.object_sizes.size();
OffsetsAssignment result = {std::vector<size_t>(num_tensors),
0};
std::vector<size_t> ids_to_offset(num_objects);
for (size_t i = 0; i < num_objects; ++i) {
ids_to_offset[i] = result.total_size;
result.total_size += obj_assignment.object_sizes[i];
}
for (size_t i = 0; i < num_tensors; ++i) {
result.offsets[i] = ids_to_offset[obj_assignment.object_ids[i]];
}
return result;
}
absl::Status BestGreedy(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
ObjectsAssignment<size_t>* assignment) {
RETURN_IF_ERROR(
GreedyBySizeDistPriorityAssignment(usage_records, assignment));
ObjectsAssignment<size_t> assignment_by_breadth;
if (GreedyByBreadthAssignment(usage_records, &assignment_by_breadth).ok() &&
TotalSize(assignment_by_breadth) < TotalSize(*assignment)) {
std::swap(*assignment, assignment_by_breadth);
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignment(usage_records, assignment,
reallocation_graph);
case MemoryStrategy::GREEDY_BY_BREADTH:
return GreedyByBreadthAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_BY_SIZE:
return GreedyBySizeDistPriorityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_BEST:
return BestGreedy(usage_records, assignment);
case MemoryStrategy::MINCOSTFLOW:
return MinCostFlowAssignment(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<BHWC>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignmentWithHash(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<uint2>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
template <>
absl::Status AssignObjectsToTensors(
const std::vector<TensorUsageRecord<uint3>>& usage_records,
MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment,
const UsageGraph* reallocation_graph) {
switch (strategy) {
case MemoryStrategy::NAIVE:
return NaiveAssignment(usage_records, assignment);
case MemoryStrategy::EQUALITY:
return EqualityAssignment(usage_records, assignment);
case MemoryStrategy::GREEDY_IN_ORDER:
return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
default:
return absl::InternalError(
"MemoryStrategy is not supported with current tensor size type.");
}
return absl::OkStatus();
}
absl::Status AssignOffsetsToTensors(
const std::vector<TensorUsageRecord<size_t>>& usage_records,
const MemoryStrategy& strategy, OffsetsAssignment* assignment,
size_t base_addr_align_bytes, const UsageGraph* reallocation_graph) {
if (strategy == MemoryStrategy::GREEDY_BY_SIZE) {
return GreedyBySizeAssignment(usage_records, base_addr_align_bytes,
assignment);
}
ObjectsAssignment<size_t> objects_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(
usage_records, strategy, &objects_assignment, reallocation_graph));
*assignment = ObjectsToOffsets(objects_assignment);
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include <cstddef>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(Model, EmptyAssignment) {
ObjectsAssignment<size_t> objects_assignment;
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_TRUE(result.offsets.empty());
EXPECT_EQ(result.total_size, 0);
}
TEST(Model, OneObjectAssignment) {
ObjectsAssignment<size_t> objects_assignment;
objects_assignment.object_sizes = {16};
objects_assignment.object_ids = {0};
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_EQ(result.total_size, 16);
EXPECT_THAT(result.offsets, ElementsAre(0));
objects_assignment.object_ids = {0, 0, 0};
result = ObjectsToOffsets(objects_assignment);
EXPECT_EQ(result.total_size, 16);
EXPECT_THAT(result.offsets, ElementsAre(0, 0, 0));
}
TEST(Model, ManyObjectsAssignment) {
ObjectsAssignment<size_t> objects_assignment;
objects_assignment.object_sizes = {16, 8, 32, 32, 4, 16};
objects_assignment.object_ids = {2, 0, 2, 1, 3, 3, 1, 5};
OffsetsAssignment result = ObjectsToOffsets(objects_assignment);
EXPECT_THAT(result.offsets, ElementsAre(24, 0, 24, 16, 56, 56, 16, 92));
}
TEST(Model, EmptyRecords) {
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::NAIVE, &assignment).ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::EQUALITY, &assignment).ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::MINCOSTFLOW, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
ASSERT_TRUE(
AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_TRUE(assignment.object_ids.empty());
EXPECT_TRUE(assignment.object_sizes.empty());
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_TRUE(offsets_assignment.offsets.empty());
EXPECT_EQ(offsets_assignment.total_size, 0);
}
TEST(Model, OneRecord) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1}};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0));
EXPECT_EQ(offsets_assignment.total_size, 16);
}
TEST(Model, ChainRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32, 8));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 1));
EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 64, 0, 64, 0));
EXPECT_EQ(offsets_assignment.total_size, 96);
}
TEST(Model, ComplexRecords) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{32, 0, 1},
{32, 1, 4},
{8, 2, 5},
{16, 3, 5},
{8, 4, 5},
{64, 5, 7},
{8, 6, 8},
{8, 7, 8},
{16, 8, 9}};
ObjectsAssignment<size_t> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(32, 32, 8, 16, 8, 64, 8, 8, 16));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 4, 2, 3));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 32, 8, 16, 8, 64));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 3, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 8, 8));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 3, 1, 3, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 16, 8));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 4, 2, 1, 3, 0, 2, 3, 1));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 16, 8, 8, 32));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(1, 0, 2, 1, 3, 0, 1, 2, 0));
EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32, 8, 8));
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment)
.ok());
EXPECT_THAT(offsets_assignment.offsets,
ElementsAre(0, 32, 80, 64, 88, 0, 64, 72, 0));
EXPECT_EQ(offsets_assignment.total_size, 96);
}
TEST(Model, BHWCRecords) {
std::vector<TensorUsageRecord<BHWC>> usage_records{
{BHWC(1, 1, 2, 8), 0, 1},
{BHWC(1, 1, 2, 8), 1, 2},
{BHWC(1, 1, 1, 16), 2, 4},
{BHWC(1, 1, 2, 8), 3, 5},
{BHWC(1, 1, 8, 2), 4, 5},
{BHWC(1, 1, 2, 8), 5, 7},
{BHWC(1, 16, 1, 1), 6, 8},
{BHWC(16, 1, 1, 1), 7, 8},
{BHWC(1, 1, 1, 16), 8, 9}};
ObjectsAssignment<BHWC> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 2, 8), BHWC(1, 1, 8, 2), BHWC(1, 1, 2, 8),
BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1), BHWC(1, 1, 1, 16)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 1, 3, 0, 4, 5, 2));
EXPECT_THAT(
assignment.object_sizes,
ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16),
BHWC(1, 1, 8, 2), BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1)));
}
TEST(Model, UInt2Records) {
std::vector<TensorUsageRecord<uint2>> usage_records{
{uint2(2, 8), 0, 1},
{uint2(2, 8), 1, 2},
{uint2(1, 12), 2, 4},
{uint2(2, 8), 3, 5},
{uint2(8, 2), 4, 5},
{uint2(2, 8), 5, 7},
{uint2(1, 8), 6, 8},
{uint2(2, 8), 7, 8},
{uint2(4, 1), 8, 9}};
ObjectsAssignment<uint2> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(2, 8),
uint2(8, 2), uint2(2, 8), uint2(1, 8), uint2(2, 8),
uint2(4, 1)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 4, 0, 5));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2),
uint2(1, 8), uint2(4, 1)));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 2, 0, 3));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2)));
}
TEST(Model, UInt3Records) {
std::vector<TensorUsageRecord<uint3>> usage_records{
{uint3(1, 2, 8), 0, 1},
{uint3(4, 3, 2), 1, 2},
{uint3(1, 1, 1), 2, 4},
{uint3(2, 4, 1), 3, 5},
{uint3(2, 2, 2), 4, 5},
{uint3(8, 1, 2), 5, 7},
{uint3(1, 2, 1), 6, 8},
{uint3(1, 1, 1), 7, 8},
{uint3(2, 2, 2), 8, 9}};
ObjectsAssignment<uint3> assignment;
ASSERT_TRUE(
AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1),
uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2),
uint3(1, 2, 1), uint3(1, 1, 1), uint3(2, 2, 2)));
ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY,
&assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 2, 4));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1),
uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2),
uint3(1, 2, 1)));
ASSERT_TRUE(AssignObjectsToTensors(
usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment)
.ok());
EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 1, 3, 2, 0, 1));
EXPECT_THAT(assignment.object_sizes,
ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(2, 4, 1),
uint3(8, 1, 2)));
}
TEST(Model, OffsetAssignmentWithAlignment) {
std::vector<TensorUsageRecord<size_t>> usage_records{
{16, 0, 1},
{8, 1, 2},
{64, 2, 3},
{32, 3, 4},
{8, 4, 5},
};
OffsetsAssignment offsets_assignment;
ASSERT_TRUE(AssignOffsetsToTensors(usage_records,
MemoryStrategy::GREEDY_BY_SIZE,
&offsets_assignment,
128)
.ok());
EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 128, 0, 128, 0));
EXPECT_EQ(offsets_assignment.total_size, 160);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/memory_management_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cbdbbcd9-531f-44a7-8c9c-6d54780b1a2c | cpp | google/quiche | web_transport_headers | quiche/web_transport/web_transport_headers.cc | quiche/web_transport/web_transport_headers_test.cc | #include "quiche/web_transport/web_transport_headers.h"
#include <array>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/common/quiche_status_utils.h"
#include "quiche/common/structured_headers.h"
namespace webtransport {
namespace {
using ::quiche::structured_headers::Dictionary;
using ::quiche::structured_headers::DictionaryMember;
using ::quiche::structured_headers::Item;
using ::quiche::structured_headers::ItemTypeToString;
using ::quiche::structured_headers::List;
using ::quiche::structured_headers::ParameterizedItem;
using ::quiche::structured_headers::ParameterizedMember;
absl::Status CheckItemType(const ParameterizedItem& item,
Item::ItemType expected_type) {
if (item.item.Type() != expected_type) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected all members to be of type ", ItemTypeToString(expected_type),
", found ", ItemTypeToString(item.item.Type()), " instead"));
}
return absl::OkStatus();
}
absl::Status CheckMemberType(const ParameterizedMember& member,
Item::ItemType expected_type) {
if (member.member_is_inner_list || member.member.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected all members to be of type", ItemTypeToString(expected_type),
", found a nested list instead"));
}
return CheckItemType(member.member[0], expected_type);
}
ABSL_CONST_INIT std::array kInitHeaderFields{
std::make_pair("u", &WebTransportInitHeader::initial_unidi_limit),
std::make_pair("bl", &WebTransportInitHeader::initial_incoming_bidi_limit),
std::make_pair("br", &WebTransportInitHeader::initial_outgoing_bidi_limit),
};
}
absl::StatusOr<std::vector<std::string>> ParseSubprotocolRequestHeader(
absl::string_view value) {
std::optional<List> parsed = quiche::structured_headers::ParseList(value);
if (!parsed.has_value()) {
return absl::InvalidArgumentError(
"Failed to parse the header as an sf-list");
}
std::vector<std::string> result;
result.reserve(parsed->size());
for (ParameterizedMember& member : *parsed) {
QUICHE_RETURN_IF_ERROR(CheckMemberType(member, Item::kTokenType));
result.push_back(std::move(member.member[0].item).TakeString());
}
return result;
}
absl::StatusOr<std::string> SerializeSubprotocolRequestHeader(
absl::Span<const std::string> subprotocols) {
for (const std::string& token : subprotocols) {
if (!quiche::structured_headers::IsValidToken(token)) {
return absl::InvalidArgumentError(absl::StrCat("Invalid token: ", token));
}
}
return absl::StrJoin(subprotocols, ", ");
}
absl::StatusOr<std::string> ParseSubprotocolResponseHeader(
absl::string_view value) {
std::optional<ParameterizedItem> parsed =
quiche::structured_headers::ParseItem(value);
if (!parsed.has_value()) {
return absl::InvalidArgumentError("Failed to parse sf-item");
}
QUICHE_RETURN_IF_ERROR(CheckItemType(*parsed, Item::kTokenType));
return std::move(parsed->item).TakeString();
}
absl::StatusOr<std::string> SerializeSubprotocolResponseHeader(
absl::string_view subprotocol) {
if (!quiche::structured_headers::IsValidToken(subprotocol)) {
return absl::InvalidArgumentError("Invalid token value supplied");
}
return std::string(subprotocol);
}
absl::StatusOr<WebTransportInitHeader> ParseInitHeader(
absl::string_view header) {
std::optional<Dictionary> parsed =
quiche::structured_headers::ParseDictionary(header);
if (!parsed.has_value()) {
return absl::InvalidArgumentError(
"Failed to parse WebTransport-Init header as an sf-dictionary");
}
WebTransportInitHeader output;
for (const auto& [field_name_a, field_value] : *parsed) {
for (const auto& [field_name_b, field_accessor] : kInitHeaderFields) {
if (field_name_a != field_name_b) {
continue;
}
QUICHE_RETURN_IF_ERROR(CheckMemberType(field_value, Item::kIntegerType));
int64_t value = field_value.member[0].item.GetInteger();
if (value < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Received negative value for ", field_name_a));
}
output.*field_accessor = value;
}
}
return output;
}
absl::StatusOr<std::string> SerializeInitHeader(
const WebTransportInitHeader& header) {
std::vector<DictionaryMember> members;
members.reserve(kInitHeaderFields.size());
for (const auto& [field_name, field_accessor] : kInitHeaderFields) {
Item item(static_cast<int64_t>(header.*field_accessor));
members.push_back(std::make_pair(
field_name, ParameterizedMember({ParameterizedItem(item, {})}, false,
{})));
}
std::optional<std::string> result =
quiche::structured_headers::SerializeDictionary(
Dictionary(std::move(members)));
if (!result.has_value()) {
return absl::InternalError("Failed to serialize the dictionary");
}
return *std::move(result);
}
} | #include "quiche/web_transport/web_transport_headers.h"
#include "absl/status/status.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace webtransport {
namespace {
using ::quiche::test::IsOkAndHolds;
using ::quiche::test::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
TEST(WebTransportHeaders, ParseSubprotocolRequestHeader) {
EXPECT_THAT(ParseSubprotocolRequestHeader("test"),
IsOkAndHolds(ElementsAre("test")));
EXPECT_THAT(ParseSubprotocolRequestHeader("moqt-draft01, moqt-draft02"),
IsOkAndHolds(ElementsAre("moqt-draft01", "moqt-draft02")));
EXPECT_THAT(ParseSubprotocolRequestHeader("moqt-draft01; a=b, moqt-draft02"),
IsOkAndHolds(ElementsAre("moqt-draft01", "moqt-draft02")));
EXPECT_THAT(ParseSubprotocolRequestHeader("moqt-draft01, moqt-draft02; a=b"),
IsOkAndHolds(ElementsAre("moqt-draft01", "moqt-draft02")));
EXPECT_THAT(ParseSubprotocolRequestHeader("\"test\""),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found string instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("42"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found integer instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("a, (b)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("a, (b c)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("foo, ?1, bar"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found boolean instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("(a"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("parse the header as an sf-list")));
}
TEST(WebTransportHeaders, SerializeSubprotocolRequestHeader) {
EXPECT_THAT(SerializeSubprotocolRequestHeader({"test"}),
IsOkAndHolds("test"));
EXPECT_THAT(SerializeSubprotocolRequestHeader({"foo", "bar"}),
IsOkAndHolds("foo, bar"));
EXPECT_THAT(SerializeSubprotocolRequestHeader({"moqt-draft01", "a/b/c"}),
IsOkAndHolds("moqt-draft01, a/b/c"));
EXPECT_THAT(
SerializeSubprotocolRequestHeader({"abcd", "0123", "efgh"}),
StatusIs(absl::StatusCode::kInvalidArgument, "Invalid token: 0123"));
}
TEST(WebTransportHeader, ParseSubprotocolResponseHeader) {
EXPECT_THAT(ParseSubprotocolResponseHeader("foo"), IsOkAndHolds("foo"));
EXPECT_THAT(ParseSubprotocolResponseHeader("foo; a=b"), IsOkAndHolds("foo"));
EXPECT_THAT(
ParseSubprotocolResponseHeader("1234"),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("found integer")));
EXPECT_THAT(
ParseSubprotocolResponseHeader("(a"),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("parse sf-item")));
}
TEST(WebTransportHeader, SerializeSubprotocolResponseHeader) {
EXPECT_THAT(SerializeSubprotocolResponseHeader("foo"), IsOkAndHolds("foo"));
EXPECT_THAT(SerializeSubprotocolResponseHeader("moqt-draft01"),
IsOkAndHolds("moqt-draft01"));
EXPECT_THAT(SerializeSubprotocolResponseHeader("123abc"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(WebTransportHeader, ParseInitHeader) {
WebTransportInitHeader expected_header;
expected_header.initial_unidi_limit = 100;
expected_header.initial_incoming_bidi_limit = 200;
expected_header.initial_outgoing_bidi_limit = 400;
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=100"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=300, bl=200, u=100, br=400"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=400, bl=200; foo=bar, u=100"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=100.0"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found decimal instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=?1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found boolean instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=(a b)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=:abcd:"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found byte sequence instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=-1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("negative value")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=18446744073709551615"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Failed to parse")));
}
TEST(WebTransportHeaders, SerializeInitHeader) {
EXPECT_THAT(SerializeInitHeader(WebTransportInitHeader{}),
IsOkAndHolds("u=0, bl=0, br=0"));
WebTransportInitHeader test_header;
test_header.initial_unidi_limit = 100;
test_header.initial_incoming_bidi_limit = 200;
test_header.initial_outgoing_bidi_limit = 400;
EXPECT_THAT(SerializeInitHeader(test_header),
IsOkAndHolds("u=100, bl=200, br=400"));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/web_transport/web_transport_headers.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/web_transport/web_transport_headers_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
45ed9816-f665-43c8-84d3-daee2ca567a2 | cpp | google/arolla | py_object_ptr_impl | py/arolla/py_utils/py_object_ptr_impl.h | py/arolla/py_utils/py_object_ptr_impl_test.cc | #ifndef THIRD_PARTY_PY_AROLLA_PY_UTILS_PY_OBJECT_PTR_IMPL_H_
#define THIRD_PARTY_PY_AROLLA_PY_UTILS_PY_OBJECT_PTR_IMPL_H_
#include <Python.h>
#include <cstddef>
#include <utility>
#include "absl/base/attributes.h"
namespace arolla::python::py_object_ptr_impl_internal {
template <typename SelfType, typename Traits>
class BasePyObjectPtr {
using GILGuardType = typename Traits::GILGuardType;
using PyObjectType = typename Traits::PyObjectType;
ABSL_ATTRIBUTE_ALWAYS_INLINE static void inc_ref(PyObjectType* ptr) {
Traits().inc_ref(ptr);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static void dec_ref(PyObjectType* ptr) {
Traits().dec_ref(ptr);
}
public:
[[nodiscard]] static SelfType Own(PyObjectType* ptr) {
SelfType result;
result.ptr_ = ptr;
return result;
}
[[nodiscard]] static SelfType NewRef(PyObjectType* ptr) {
SelfType result;
if (ptr != nullptr) {
GILGuardType gil_guard;
result.ptr_ = ptr;
inc_ref(result.ptr_);
}
return result;
}
BasePyObjectPtr() = default;
~BasePyObjectPtr() { reset(); }
BasePyObjectPtr(const BasePyObjectPtr& other) {
if (other.ptr_ != nullptr) {
GILGuardType gil_guard;
ptr_ = other.ptr_;
inc_ref(ptr_);
}
}
BasePyObjectPtr& operator=(const BasePyObjectPtr& other) {
if (ptr_ != other.ptr_) {
GILGuardType gil_guard;
PyObjectType* old_ptr = std::exchange(ptr_, other.ptr_);
if (ptr_ != nullptr) {
inc_ref(ptr_);
}
if (old_ptr != nullptr) {
dec_ref(old_ptr);
}
}
return *this;
}
BasePyObjectPtr(BasePyObjectPtr&& other) : ptr_(other.release()) {}
BasePyObjectPtr& operator=(BasePyObjectPtr&& other) {
PyObjectType* old_ptr = std::exchange(ptr_, other.release());
if (old_ptr != nullptr) {
GILGuardType gil_guard;
dec_ref(old_ptr);
}
return *this;
}
[[nodiscard]] PyObjectType* get() const { return ptr_; }
bool operator==(std::nullptr_t) const { return ptr_ == nullptr; }
bool operator!=(std::nullptr_t) const { return ptr_ != nullptr; }
[[nodiscard]] PyObjectType* release() { return std::exchange(ptr_, nullptr); }
void reset() {
if (PyObjectType* old_ptr = release()) {
GILGuardType gil_guard;
dec_ref(old_ptr);
}
}
private:
PyObjectType* ptr_ = nullptr;
};
}
#endif | #include "py/arolla/py_utils/py_object_ptr_impl.h"
#include <Python.h>
#include <utility>
#include "gtest/gtest.h"
namespace arolla::python::py_object_ptr_impl_internal::testing {
namespace {
struct DummyGILGuard {
static int active;
static int total;
static void reset() {
active = 0;
total = 0;
}
DummyGILGuard() {
++active;
++total;
}
~DummyGILGuard() { --active; }
DummyGILGuard(const DummyGILGuard&) = delete;
DummyGILGuard& operator=(const DummyGILGuard&) = delete;
};
int DummyGILGuard::active;
int DummyGILGuard::total;
struct DummyPyObject {
int ref_counter = {1};
};
struct DummyTraits {
using GILGuardType = DummyGILGuard;
using PyObjectType = DummyPyObject;
void inc_ref(PyObjectType* ptr) { ++ptr->ref_counter; }
void dec_ref(PyObjectType* ptr) { --ptr->ref_counter; }
};
class DummyPyObjectPtr final
: public BasePyObjectPtr<DummyPyObjectPtr, DummyTraits> {
public:
DummyPyObjectPtr() = default;
DummyPyObjectPtr(const DummyPyObjectPtr&) = default;
DummyPyObjectPtr& operator=(const DummyPyObjectPtr&) = default;
DummyPyObjectPtr(DummyPyObjectPtr&&) = default;
DummyPyObjectPtr& operator=(DummyPyObjectPtr&&) = default;
};
class BasePyObjectPtrTest : public ::testing::Test {
protected:
void SetUp() override { DummyGILGuard::reset(); }
};
TEST_F(BasePyObjectPtrTest, OwnFactoryNull) {
{
auto ptr = DummyPyObjectPtr::Own(nullptr);
ASSERT_EQ(ptr.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, OwnNullFactory) {
DummyPyObject obj;
{
auto ptr = DummyPyObjectPtr::Own(&obj);
ASSERT_EQ(ptr.get(), &obj);
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
TEST_F(BasePyObjectPtrTest, NewRefNullFactory) {
{
auto ptr = DummyPyObjectPtr::NewRef(nullptr);
ASSERT_EQ(ptr.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, NewRefFactory) {
DummyPyObject obj;
{
auto ptr = DummyPyObjectPtr::NewRef(&obj);
ASSERT_EQ(ptr.get(), &obj);
ASSERT_EQ(obj.ref_counter, 2);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 2);
}
TEST_F(BasePyObjectPtrTest, DefaultCtor) {
{
DummyPyObjectPtr ptr;
ASSERT_EQ(ptr.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, CopyNullCtor) {
{
DummyPyObjectPtr ptr1;
DummyPyObjectPtr ptr2 = ptr1;
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, CopyCtor) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj);
DummyPyObjectPtr ptr2 = ptr1;
ASSERT_EQ(ptr1.get(), &obj);
ASSERT_EQ(ptr2.get(), &obj);
ASSERT_EQ(obj.ref_counter, 2);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 3);
}
TEST_F(BasePyObjectPtrTest, MoveNullCtor) {
{
DummyPyObjectPtr ptr1;
DummyPyObjectPtr ptr2 = std::move(ptr1);
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, MoveCtor) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj);
DummyPyObjectPtr ptr2 = std::move(ptr1);
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), &obj);
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
TEST_F(BasePyObjectPtrTest, CopyOp_Null_Null) {
{
DummyPyObjectPtr ptr1;
DummyPyObjectPtr ptr2;
ptr1 = ptr2;
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, CopyOp_Null_Obj) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr1;
DummyPyObjectPtr ptr2 = DummyPyObjectPtr::Own(&obj);
ptr1 = ptr2;
ASSERT_EQ(ptr1.get(), &obj);
ASSERT_EQ(ptr2.get(), &obj);
ASSERT_EQ(obj.ref_counter, 2);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 3);
}
TEST_F(BasePyObjectPtrTest, CopyOp_Obj_Null) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj);
DummyPyObjectPtr ptr2;
ptr1 = ptr2;
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
TEST_F(BasePyObjectPtrTest, CopyOp_Obj1_Obj1) {
DummyPyObject obj1;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj1);
DummyPyObjectPtr ptr2 = DummyPyObjectPtr::NewRef(
&obj1);
ptr1 = ptr2;
ASSERT_EQ(ptr1.get(), &obj1);
ASSERT_EQ(ptr2.get(), &obj1);
ASSERT_EQ(obj1.ref_counter, 2);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj1.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 3);
}
TEST_F(BasePyObjectPtrTest, CopyOp_Obj1_Obj2) {
DummyPyObject obj1;
DummyPyObject obj2;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj1);
DummyPyObjectPtr ptr2 = DummyPyObjectPtr::Own(&obj2);
ptr1 = ptr2;
ASSERT_EQ(ptr1.get(), &obj2);
ASSERT_EQ(ptr2.get(), &obj2);
ASSERT_EQ(obj1.ref_counter, 0);
ASSERT_EQ(obj2.ref_counter, 2);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj1.ref_counter, 0);
ASSERT_EQ(obj2.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 3);
}
TEST_F(BasePyObjectPtrTest, MoveOp_Null_Null) {
{
DummyPyObjectPtr ptr1;
DummyPyObjectPtr ptr2;
ptr1 = std::move(ptr2);
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, MoveOp_Null_Obj) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr1;
DummyPyObjectPtr ptr2 = DummyPyObjectPtr::Own(&obj);
ptr1 = std::move(ptr2);
ASSERT_EQ(ptr1.get(), &obj);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
TEST_F(BasePyObjectPtrTest, MoveOp_Obj_Null) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj);
DummyPyObjectPtr ptr2;
ptr1 = std::move(ptr2);
ASSERT_EQ(ptr1.get(), nullptr);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
TEST_F(BasePyObjectPtrTest, MoveOp_Obj1_Obj1) {
DummyPyObject obj1;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj1);
DummyPyObjectPtr ptr2 = DummyPyObjectPtr::NewRef(
&obj1);
ptr1 = std::move(ptr2);
ASSERT_EQ(ptr1.get(), &obj1);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(obj1.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 2);
}
ASSERT_EQ(obj1.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 3);
}
TEST_F(BasePyObjectPtrTest, MoveOp_Obj1_Obj2) {
DummyPyObject obj1;
DummyPyObject obj2;
{
DummyPyObjectPtr ptr1 = DummyPyObjectPtr::Own(&obj1);
DummyPyObjectPtr ptr2 = DummyPyObjectPtr::Own(&obj2);
ptr1 = std::move(ptr2);
ASSERT_EQ(ptr1.get(), &obj2);
ASSERT_EQ(ptr2.get(), nullptr);
ASSERT_EQ(obj1.ref_counter, 0);
ASSERT_EQ(obj2.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj1.ref_counter, 0);
ASSERT_EQ(obj2.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 2);
}
TEST_F(BasePyObjectPtrTest, Equality) {
DummyPyObject obj;
const DummyPyObjectPtr null_ptr;
const DummyPyObjectPtr not_null_ptr = DummyPyObjectPtr::Own(&obj);
ASSERT_TRUE(null_ptr == nullptr);
ASSERT_FALSE(null_ptr != nullptr);
ASSERT_FALSE(not_null_ptr == nullptr);
ASSERT_TRUE(not_null_ptr != nullptr);
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, Release) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr = DummyPyObjectPtr::Own(&obj);
ASSERT_EQ(ptr.release(), &obj);
ASSERT_EQ(ptr.get(), nullptr);
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(obj.ref_counter, 1);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, ResetNull) {
{
DummyPyObjectPtr ptr;
ptr.reset();
ASSERT_EQ(ptr.get(), nullptr);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 0);
}
TEST_F(BasePyObjectPtrTest, Reset) {
DummyPyObject obj;
{
DummyPyObjectPtr ptr = DummyPyObjectPtr::Own(&obj);
ptr.reset();
ASSERT_EQ(ptr.get(), nullptr);
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
ASSERT_EQ(obj.ref_counter, 0);
ASSERT_EQ(DummyGILGuard::active, 0);
ASSERT_EQ(DummyGILGuard::total, 1);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/py/arolla/py_utils/py_object_ptr_impl.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/py/arolla/py_utils/py_object_ptr_impl_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
458e0be5-a6e4-4cfb-b5e8-68bac274c1b0 | cpp | google/cel-cpp | field_backed_map_impl | eval/public/containers/field_backed_map_impl.h | eval/public/containers/field_backed_map_impl_test.cc | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_CONTAINERS_FIELD_BACKED_MAP_IMPL_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_CONTAINERS_FIELD_BACKED_MAP_IMPL_H_
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "absl/status/statusor.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/internal_field_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
namespace google::api::expr::runtime {
class FieldBackedMapImpl : public internal::FieldBackedMapImpl {
public:
FieldBackedMapImpl(const google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* descriptor,
google::protobuf::Arena* arena)
: internal::FieldBackedMapImpl(
message, descriptor, &CelProtoWrapper::InternalWrapMessage, arena) {
}
};
}
#endif | #include "eval/public/containers/field_backed_map_impl.h"
#include <array>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::UnorderedPointwise;
std::unique_ptr<FieldBackedMapImpl> CreateMap(const TestMessage* message,
const std::string& field,
google::protobuf::Arena* arena) {
const google::protobuf::FieldDescriptor* field_desc =
message->GetDescriptor()->FindFieldByName(field);
return std::make_unique<FieldBackedMapImpl>(message, field_desc, arena);
}
TEST(FieldBackedMapImplTest, BadKeyTypeTest) {
TestMessage message;
google::protobuf::Arena arena;
constexpr std::array<absl::string_view, 6> map_types = {
"int64_int32_map", "uint64_int32_map", "string_int32_map",
"bool_int32_map", "int32_int32_map", "uint32_uint32_map",
};
for (auto map_type : map_types) {
auto cel_map = CreateMap(&message, std::string(map_type), &arena);
auto result = cel_map->Has(CelValue::CreateNull());
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kInvalidArgument));
auto lookup = (*cel_map)[CelValue::CreateNull()];
EXPECT_TRUE(lookup.has_value());
EXPECT_TRUE(lookup->IsError());
EXPECT_THAT(lookup->ErrorOrDie()->code(),
Eq(absl::StatusCode::kInvalidArgument));
}
}
TEST(FieldBackedMapImplTest, Int32KeyTest) {
TestMessage message;
auto field_map = message.mutable_int32_int32_map();
(*field_map)[0] = 1;
(*field_map)[1] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "int32_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateInt64(1)).value_or(false));
EXPECT_FALSE((*cel_map)[CelValue::CreateInt64(3)].has_value());
EXPECT_FALSE(cel_map->Has(CelValue::CreateInt64(3)).value_or(true));
}
TEST(FieldBackedMapImplTest, Int32KeyOutOfRangeTest) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "int32_int32_map", &arena);
auto result = cel_map->Has(
CelValue::CreateInt64(std::numeric_limits<int32_t>::max() + 1L));
EXPECT_THAT(result.status(),
StatusIs(absl::StatusCode::kOutOfRange, HasSubstr("overflow")));
result = cel_map->Has(
CelValue::CreateInt64(std::numeric_limits<int32_t>::lowest() - 1L));
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kOutOfRange));
}
TEST(FieldBackedMapImplTest, Int64KeyTest) {
TestMessage message;
auto field_map = message.mutable_int64_int32_map();
(*field_map)[0] = 1;
(*field_map)[1] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "int64_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateInt64(1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateInt64(3)].has_value(), false);
}
TEST(FieldBackedMapImplTest, BoolKeyTest) {
TestMessage message;
auto field_map = message.mutable_bool_int32_map();
(*field_map)[false] = 1;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "bool_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateBool(false)]->Int64OrDie(), 1);
EXPECT_TRUE(cel_map->Has(CelValue::CreateBool(false)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateBool(true)].has_value(), false);
(*field_map)[true] = 2;
EXPECT_EQ((*cel_map)[CelValue::CreateBool(true)]->Int64OrDie(), 2);
}
TEST(FieldBackedMapImplTest, Uint32KeyTest) {
TestMessage message;
auto field_map = message.mutable_uint32_uint32_map();
(*field_map)[0] = 1u;
(*field_map)[1] = 2u;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "uint32_uint32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(0)]->Uint64OrDie(), 1UL);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(1)]->Uint64OrDie(), 2UL);
EXPECT_TRUE(cel_map->Has(CelValue::CreateUint64(1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(3)].has_value(), false);
EXPECT_EQ(cel_map->Has(CelValue::CreateUint64(3)).value_or(true), false);
}
TEST(FieldBackedMapImplTest, Uint32KeyOutOfRangeTest) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "uint32_uint32_map", &arena);
auto result = cel_map->Has(
CelValue::CreateUint64(std::numeric_limits<uint32_t>::max() + 1UL));
EXPECT_FALSE(result.ok());
EXPECT_THAT(result.status().code(), Eq(absl::StatusCode::kOutOfRange));
}
TEST(FieldBackedMapImplTest, Uint64KeyTest) {
TestMessage message;
auto field_map = message.mutable_uint64_int32_map();
(*field_map)[0] = 1;
(*field_map)[1] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "uint64_int32_map", &arena);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateUint64(1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateUint64(3)].has_value(), false);
}
TEST(FieldBackedMapImplTest, StringKeyTest) {
TestMessage message;
auto field_map = message.mutable_string_int32_map();
(*field_map)["test0"] = 1;
(*field_map)["test1"] = 2;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
std::string test0 = "test0";
std::string test1 = "test1";
std::string test_notfound = "test_notfound";
EXPECT_EQ((*cel_map)[CelValue::CreateString(&test0)]->Int64OrDie(), 1);
EXPECT_EQ((*cel_map)[CelValue::CreateString(&test1)]->Int64OrDie(), 2);
EXPECT_TRUE(cel_map->Has(CelValue::CreateString(&test1)).value_or(false));
EXPECT_EQ((*cel_map)[CelValue::CreateString(&test_notfound)].has_value(),
false);
}
TEST(FieldBackedMapImplTest, EmptySizeTest) {
TestMessage message;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
EXPECT_EQ(cel_map->size(), 0);
}
TEST(FieldBackedMapImplTest, RepeatedAddTest) {
TestMessage message;
auto field_map = message.mutable_string_int32_map();
(*field_map)["test0"] = 1;
(*field_map)["test1"] = 2;
(*field_map)["test0"] = 3;
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
EXPECT_EQ(cel_map->size(), 2);
}
TEST(FieldBackedMapImplTest, KeyListTest) {
TestMessage message;
auto field_map = message.mutable_string_int32_map();
std::vector<std::string> keys;
std::vector<std::string> keys1;
for (int i = 0; i < 100; i++) {
keys.push_back(absl::StrCat("test", i));
(*field_map)[keys.back()] = i;
}
google::protobuf::Arena arena;
auto cel_map = CreateMap(&message, "string_int32_map", &arena);
const CelList* key_list = cel_map->ListKeys().value();
EXPECT_EQ(key_list->size(), 100);
for (int i = 0; i < key_list->size(); i++) {
keys1.push_back(std::string((*key_list)[i].StringOrDie().value()));
}
EXPECT_THAT(keys, UnorderedPointwise(Eq(), keys1));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/field_backed_map_impl.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/field_backed_map_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3237f930-4e86-4d74-98a8-7753c2534621 | cpp | tensorflow/tensorflow | summary_audio_op | tensorflow/core/kernels/summary_audio_op.cc | tensorflow/core/kernels/summary_audio_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/wav/wav_io.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class SummaryAudioOp : public OpKernel {
public:
explicit SummaryAudioOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("max_outputs", &max_outputs_));
OP_REQUIRES(context, max_outputs_ > 0,
errors::InvalidArgument("max_outputs must be > 0"));
has_sample_rate_attr_ =
context->GetAttr("sample_rate", &sample_rate_attr_).ok();
}
void Compute(OpKernelContext* c) override {
const Tensor& tag = c->input(0);
const Tensor& tensor = c->input(1);
OP_REQUIRES(c, TensorShapeUtils::IsScalar(tag.shape()),
errors::InvalidArgument("Tag must be a scalar"));
OP_REQUIRES(c, tensor.dims() >= 2 && tensor.dims() <= 3,
errors::InvalidArgument("Tensor must be 3-D or 2-D, got: ",
tensor.shape().DebugString()));
const string& base_tag = tag.scalar<tstring>()();
float sample_rate = sample_rate_attr_;
if (!has_sample_rate_attr_) {
const Tensor& sample_rate_tensor = c->input(2);
OP_REQUIRES(c,
sample_rate_tensor.IsAligned() &&
sample_rate_tensor.NumElements() == 1,
errors::InvalidArgument(
"sample_rate must be rank-0 or contain a single value"));
sample_rate = sample_rate_tensor.scalar<float>()();
}
OP_REQUIRES(c, sample_rate > 0.0f,
errors::InvalidArgument("sample_rate must be > 0"));
const int batch_size = tensor.dim_size(0);
const int64_t length_frames = tensor.dim_size(1);
const int64_t num_channels =
tensor.dims() == 2 ? 1 : tensor.dim_size(tensor.dims() - 1);
Summary s;
const int N = std::min<int>(max_outputs_, batch_size);
for (int i = 0; i < N; ++i) {
Summary::Value* v = s.add_value();
if (max_outputs_ > 1) {
v->set_tag(strings::StrCat(base_tag, "/audio/", i));
} else {
v->set_tag(strings::StrCat(base_tag, "/audio"));
}
Summary::Audio* sa = v->mutable_audio();
sa->set_sample_rate(sample_rate);
sa->set_num_channels(num_channels);
sa->set_length_frames(length_frames);
sa->set_content_type("audio/wav");
auto values =
tensor.shaped<float, 3>({batch_size, length_frames, num_channels});
const float* data =
tensor.NumElements() == 0 ? nullptr : &values(i, 0, 0);
size_t sample_rate_truncated = lrintf(sample_rate);
if (sample_rate_truncated == 0) {
sample_rate_truncated = 1;
}
OP_REQUIRES_OK(c, wav::EncodeAudioAsS16LEWav(
data, sample_rate_truncated, num_channels,
length_frames, sa->mutable_encoded_audio_string()));
}
Tensor* summary_tensor = nullptr;
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
}
private:
int max_outputs_;
bool has_sample_rate_attr_;
float sample_rate_attr_;
};
REGISTER_KERNEL_BUILDER(Name("AudioSummaryV2").Device(DEVICE_CPU),
SummaryAudioOp);
REGISTER_KERNEL_BUILDER(Name("AudioSummary").Device(DEVICE_CPU),
SummaryAudioOp);
} | #include <functional>
#include <memory>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/histogram/histogram.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void EXPECT_SummaryMatches(const Summary& actual,
const string& expected_str) {
Summary expected;
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
EXPECT_EQ(expected.DebugString(), actual.DebugString());
}
class SummaryAudioOpTest : public OpsTestBase {
protected:
void MakeOp(const int max_outputs) {
TF_ASSERT_OK(NodeDefBuilder("myop", "AudioSummaryV2")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Attr("max_outputs", max_outputs)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
void CheckAndRemoveEncodedAudio(Summary* summary) {
for (int i = 0; i < summary->value_size(); ++i) {
Summary::Value* value = summary->mutable_value(i);
ASSERT_TRUE(value->has_audio()) << "No audio for value: " << value->tag();
ASSERT_FALSE(value->audio().encoded_audio_string().empty())
<< "No encoded_audio_string for value: " << value->tag();
if (VLOG_IS_ON(2)) {
TF_CHECK_OK(WriteStringToFile(
Env::Default(), strings::StrCat("/tmp/", value->tag(), ".wav"),
value->audio().encoded_audio_string()));
}
value->mutable_audio()->clear_encoded_audio_string();
}
}
};
TEST_F(SummaryAudioOpTest, Basic3D) {
const float kSampleRate = 44100.0f;
const int kMaxOutputs = 3;
MakeOp(kMaxOutputs);
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({4, 2, 2}),
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
AddInputFromArray<float>(TensorShape({}), {kSampleRate});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
CheckAndRemoveEncodedAudio(&summary);
EXPECT_SummaryMatches(summary, R"(
value { tag: 'tag/audio/0'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2
length_frames: 2 } }
value { tag: 'tag/audio/1'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2
length_frames: 2 } }
value { tag: 'tag/audio/2'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2
length_frames: 2 } }
)");
}
TEST_F(SummaryAudioOpTest, Basic2D) {
const float kSampleRate = 44100.0f;
const int kMaxOutputs = 3;
MakeOp(kMaxOutputs);
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({4, 4}),
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
AddInputFromArray<float>(TensorShape({}), {kSampleRate});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
CheckAndRemoveEncodedAudio(&summary);
EXPECT_SummaryMatches(summary, R"(
value { tag: 'tag/audio/0'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1
length_frames: 4 } }
value { tag: 'tag/audio/1'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1
length_frames: 4 } }
value { tag: 'tag/audio/2'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1
length_frames: 4 } }
)");
}
TEST_F(SummaryAudioOpTest, ZeroLength) {
const float kSampleRate = 44100.0f;
const int kMaxOutputs = 3;
MakeOp(kMaxOutputs);
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({4, 0}), {});
AddInputFromArray<float>(TensorShape({}), {kSampleRate});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
CheckAndRemoveEncodedAudio(&summary);
EXPECT_SummaryMatches(summary, R"(
value { tag: 'tag/audio/0'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1
length_frames: 0 } }
value { tag: 'tag/audio/1'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1
length_frames: 0 } }
value { tag: 'tag/audio/2'
audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1
length_frames: 0 } }
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_audio_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_audio_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d0c65d5-2cb4-4db6-a272-128a033d924c | cpp | tensorflow/tensorflow | dynamic_partition_op | tensorflow/compiler/tf2xla/kernels/dynamic_partition_op.cc | tensorflow/core/kernels/dynamic_partition_op_test.cc | #include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
class DynamicPartitionOp : public XlaOpKernel {
public:
explicit DynamicPartitionOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("num_partitions", &num_partitions_));
}
xla::XlaOp CountS32(XlaOpKernelContext* ctx, xla::XlaOp input,
int64_t target) {
xla::XlaOp equal_dim =
xla::Compare(input, xla::ConstantR0<int32>(ctx->builder(), target), {},
xla::ComparisonDirection::kEq);
xla::XlaOp casted = xla::ConvertElementType(equal_dim, xla::S32);
return xla::ReduceAll(
casted, xla::Zero(ctx->builder(), xla::S32),
xla::CreateScalarAddComputation(xla::S32, ctx->builder()));
}
std::pair<std::vector<xla::XlaOp>, std::vector<xla::XlaOp>>
DynamicPartition1D(XlaOpKernelContext* ctx, xla::XlaOp data_1d,
xla::XlaOp partitions_1d, const xla::Shape& data_1d_shape,
const xla::Shape& partition_1d_shape) {
int64_t input_count = data_1d_shape.dimensions(0);
std::vector<xla::XlaOp> to_sort = {partitions_1d, data_1d};
std::vector<xla::PrimitiveType> types_to_sort = {
partition_1d_shape.element_type(), data_1d_shape.element_type()};
xla::XlaOp sorted = xla::Sort(
to_sort, xla::CreateScalarLtComputation(types_to_sort, ctx->builder()),
0,
true);
xla::XlaOp sorted_partitions = xla::GetTupleElement(sorted, 0);
xla::XlaOp sorted_data = xla::GetTupleElement(sorted, 1);
std::vector<xla::XlaOp> partition_length(num_partitions_);
std::vector<xla::XlaOp> partition_start(num_partitions_);
xla::XlaOp count_so_far = xla::Zero(ctx->builder(), xla::S32);
for (int64_t i = 0; i < num_partitions_; ++i) {
xla::XlaOp count = CountS32(ctx, sorted_partitions, i);
partition_length[i] = count;
partition_start[i] = count_so_far;
count_so_far = xla::Add(count_so_far, count);
}
xla::PaddingConfig padding_config;
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_edge_padding_high(input_count);
dims->set_interior_padding(0);
auto padded_data =
xla::Pad(sorted_data, xla::Zero(ctx->builder(), ctx->input_xla_type(0)),
padding_config);
std::vector<xla::XlaOp> output(num_partitions_);
for (int64_t i = 0; i < num_partitions_; ++i) {
padded_data = xla::RemoveDynamicDimension(padded_data, 0);
auto sliced =
xla::DynamicSlice(padded_data, {partition_start[i]}, {input_count});
output[i] = sliced;
}
return {output, partition_length};
}
void Compile(XlaOpKernelContext* ctx) override {
xla::Shape data_shape = ctx->InputXlaShape(0).value();
xla::Shape partition_shape = ctx->InputXlaShape(1).value();
xla::XlaOp data = ctx->Input(0);
xla::XlaOp partitions = ctx->Input(1);
std::vector<int64_t> partitions_static;
bool partitions_are_static =
ctx->ConstantInputReshapedToIntVector(1, &partitions_static).ok();
if (data_shape.rank() > partition_shape.rank()) {
std::vector<int64_t> broadcasted_dims;
auto rank = partition_shape.rank();
broadcasted_dims.reserve(rank);
for (int64_t i = 0; i < rank; ++i) {
broadcasted_dims.push_back(i);
}
partitions = xla::BroadcastInDim(partitions, data_shape.dimensions(),
broadcasted_dims);
}
std::vector<int64_t> output_shape_bound_dims;
output_shape_bound_dims.push_back(
xla::ShapeUtil::ElementsIn(partition_shape));
int64_t count_diff = 1;
for (int64_t i = partition_shape.rank(); i < data_shape.rank(); ++i) {
output_shape_bound_dims.push_back(data_shape.dimensions(i));
count_diff *= data_shape.dimensions(i);
}
int64_t input_count = xla::ShapeUtil::ElementsIn(data_shape);
auto data_1d = xla::Reshape(data, {input_count});
auto partitions_1d = xla::Reshape(partitions, {input_count});
xla::Shape data_1d_shape =
xla::ShapeUtil::MakeShape(data_shape.element_type(), {input_count});
xla::Shape partitions_1d_shape = xla::ShapeUtil::MakeShape(
partition_shape.element_type(), {input_count});
std::vector<xla::XlaOp> output, partition_length;
std::tie(output, partition_length) = DynamicPartition1D(
ctx, data_1d, partitions_1d, data_1d_shape, partitions_1d_shape);
for (int64_t i = 0; i < num_partitions_; ++i) {
auto reshape = xla::Reshape(output[i], output_shape_bound_dims);
if (partitions_are_static) {
int64_t size = absl::c_count(partitions_static, i);
ctx->SetOutput(i, xla::SliceInDim(reshape, 0, size, 1, 0));
} else {
xla::XlaOp length;
if (count_diff != 0) {
length = xla::Div(partition_length[i],
xla::ConstantR0<int32>(ctx->builder(), count_diff));
} else {
length = CountS32(ctx, ctx->Input(1), i);
}
ctx->SetOutput(i, xla::SetDimensionSize(reshape, length, 0));
}
}
}
private:
int64_t num_partitions_;
};
REGISTER_XLA_OP(Name("DynamicPartition"), DynamicPartitionOp);
}
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class DynamicPartitionOpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_ASSERT_OK(NodeDefBuilder("myop", "DynamicPartition")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("num_partitions", 4)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(DynamicPartitionOpTest, Simple_OneD) {
MakeOp();
AddInputFromArray<float>(TensorShape({6}), {0, 13, 2, 39, 4, 17});
AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 3, 2, 1});
TF_ASSERT_OK(RunOpKernel());
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected, {0, 13});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {17});
test::ExpectTensorEqual<float>(expected, *GetOutput(1));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected, {2, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(2));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {39});
test::ExpectTensorEqual<float>(expected, *GetOutput(3));
}
}
TEST_F(DynamicPartitionOpTest, Simple_TwoD) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17});
AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 3, 2, 1});
TF_ASSERT_OK(RunOpKernel());
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {0, 1, 2, 3, 4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected, {15, 16, 17});
test::ExpectTensorEqual<float>(expected, *GetOutput(1));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {6, 7, 8, 12, 13, 14});
test::ExpectTensorEqual<float>(expected, *GetOutput(2));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected, {9, 10, 11});
test::ExpectTensorEqual<float>(expected, *GetOutput(3));
}
}
TEST_F(DynamicPartitionOpTest, SomeOutputsEmpty) {
MakeOp();
AddInputFromArray<float>(TensorShape({6}), {0, 13, 2, 39, 4, 17});
AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 2, 0, 2});
TF_ASSERT_OK(RunOpKernel());
TensorShape empty_one_dim;
empty_one_dim.AddDim(0);
Tensor expected_empty(allocator(), DT_FLOAT, empty_one_dim);
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0, 13, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
{
test::ExpectTensorEqual<float>(expected_empty, *GetOutput(1));
}
{
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {2, 39, 17});
test::ExpectTensorEqual<float>(expected, *GetOutput(2));
}
{
test::ExpectTensorEqual<float>(expected_empty, *GetOutput(3));
}
}
TEST_F(DynamicPartitionOpTest, Error_IndexOutOfRange) {
MakeOp();
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14});
AddInputFromArray<int32>(TensorShape({5}), {0, 2, 99, 2, 2});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(), "partitions[2] = 99 is not in [0, 4)"))
<< s;
}
Node* DynamicPartitionNode(Graph* g, Node* in0, Node* in1, int num_partitions) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "DynamicPartition")
.Input(in0)
.Input(in1)
.Attr("num_partitions", num_partitions)
.Finalize(g, &ret));
return ret;
}
template <typename T>
static Graph* DynamicPartition(int num_partitions, int dim) {
Graph* g = new Graph(OpRegistry::Global());
const int kRows = ((128 << 20) / sizeof(T)) / dim;
Tensor data(DataTypeToEnum<T>::value, TensorShape({kRows, dim}));
data.flat<T>().setRandom();
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
Tensor partitions(DT_INT32, TensorShape({kRows}));
for (int i = 0; i < kRows; i++) {
partitions.flat<int32>()(i) = rnd.Uniform(num_partitions);
}
DynamicPartitionNode(g, test::graph::Constant(g, data),
test::graph::Constant(g, partitions), num_partitions);
return g;
}
#define BM_DYNAMIC_PARTITION(DEVICE, T, num) \
static void BM_##DEVICE##_dynpart_##T##_##num( \
::testing::benchmark::State& state) { \
const int dim = state.range(0); \
\
const int64_t items = ((128 << 20) / sizeof(T)); \
test::Benchmark(#DEVICE, DynamicPartition<T>(num, dim), \
false) \
.Run(state); \
const int64_t tot = static_cast<int64_t>(state.iterations()) * items; \
state.SetItemsProcessed(tot); \
} \
BENCHMARK(BM_##DEVICE##_dynpart_##T##_##num)->UseRealTime()->Arg(1)->Arg(256)
BM_DYNAMIC_PARTITION(cpu, float, 2);
BM_DYNAMIC_PARTITION(cpu, float, 100);
BM_DYNAMIC_PARTITION(cpu, double, 2);
BM_DYNAMIC_PARTITION(cpu, double, 100);
BM_DYNAMIC_PARTITION(cpu, complex64, 2);
BM_DYNAMIC_PARTITION(cpu, complex64, 100);
BM_DYNAMIC_PARTITION(gpu, int32, 2);
BM_DYNAMIC_PARTITION(gpu, int32, 100);
BM_DYNAMIC_PARTITION(gpu, int64, 2);
BM_DYNAMIC_PARTITION(gpu, int64, 100);
BM_DYNAMIC_PARTITION(gpu, float, 2);
BM_DYNAMIC_PARTITION(gpu, float, 100);
BM_DYNAMIC_PARTITION(gpu, double, 2);
BM_DYNAMIC_PARTITION(gpu, double, 100);
BM_DYNAMIC_PARTITION(gpu, complex64, 2);
BM_DYNAMIC_PARTITION(gpu, complex64, 100);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/dynamic_partition_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/dynamic_partition_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b8a4b1c-0153-4d36-9ce3-49d6b157c049 | cpp | google/cel-cpp | bool_value | common/values/bool_value.cc | common/values/bool_value_test.cc | #include <cstddef>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
std::string BoolDebugString(bool value) { return value ? "true" : "false"; }
}
std::string BoolValue::DebugString() const {
return BoolDebugString(NativeValue());
}
absl::StatusOr<Json> BoolValue::ConvertToJson(AnyToJsonConverter&) const {
return NativeValue();
}
absl::Status BoolValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeBoolValue(NativeValue(), value);
}
absl::Status BoolValue::Equal(ValueManager&, const Value& other,
Value& result) const {
if (auto other_value = As<BoolValue>(other); other_value.has_value()) {
result = BoolValue{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
absl::StatusOr<Value> BoolValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <sstream>
#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using BoolValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(BoolValueTest, Kind) {
EXPECT_EQ(BoolValue(true).kind(), BoolValue::kKind);
EXPECT_EQ(Value(BoolValue(true)).kind(), BoolValue::kKind);
}
TEST_P(BoolValueTest, DebugString) {
{
std::ostringstream out;
out << BoolValue(true);
EXPECT_EQ(out.str(), "true");
}
{
std::ostringstream out;
out << Value(BoolValue(true));
EXPECT_EQ(out.str(), "true");
}
}
TEST_P(BoolValueTest, ConvertToJson) {
EXPECT_THAT(BoolValue(false).ConvertToJson(value_manager()),
IsOkAndHolds(Json(false)));
}
TEST_P(BoolValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(BoolValue(true)), NativeTypeId::For<BoolValue>());
EXPECT_EQ(NativeTypeId::Of(Value(BoolValue(true))),
NativeTypeId::For<BoolValue>());
}
TEST_P(BoolValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<BoolValue>(BoolValue(true)));
EXPECT_TRUE(InstanceOf<BoolValue>(Value(BoolValue(true))));
}
TEST_P(BoolValueTest, Cast) {
EXPECT_THAT(Cast<BoolValue>(BoolValue(true)), An<BoolValue>());
EXPECT_THAT(Cast<BoolValue>(Value(BoolValue(true))), An<BoolValue>());
}
TEST_P(BoolValueTest, As) {
EXPECT_THAT(As<BoolValue>(Value(BoolValue(true))), Ne(absl::nullopt));
}
TEST_P(BoolValueTest, HashValue) {
EXPECT_EQ(absl::HashOf(BoolValue(true)), absl::HashOf(true));
}
TEST_P(BoolValueTest, Equality) {
EXPECT_NE(BoolValue(false), true);
EXPECT_NE(true, BoolValue(false));
EXPECT_NE(BoolValue(false), BoolValue(true));
}
TEST_P(BoolValueTest, LessThan) {
EXPECT_LT(BoolValue(false), true);
EXPECT_LT(false, BoolValue(true));
EXPECT_LT(BoolValue(false), BoolValue(true));
}
INSTANTIATE_TEST_SUITE_P(
BoolValueTest, BoolValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
BoolValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/bool_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/bool_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
6e8c1f82-a063-4a88-b2c7-c93d33ccf681 | cpp | google/quiche | qpack_required_insert_count | quiche/quic/core/qpack/qpack_required_insert_count.cc | quiche/quic/core/qpack/qpack_required_insert_count_test.cc | #include "quiche/quic/core/qpack/qpack_required_insert_count.h"
#include <limits>
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
uint64_t QpackEncodeRequiredInsertCount(uint64_t required_insert_count,
uint64_t max_entries) {
if (required_insert_count == 0) {
return 0;
}
return required_insert_count % (2 * max_entries) + 1;
}
bool QpackDecodeRequiredInsertCount(uint64_t encoded_required_insert_count,
uint64_t max_entries,
uint64_t total_number_of_inserts,
uint64_t* required_insert_count) {
if (encoded_required_insert_count == 0) {
*required_insert_count = 0;
return true;
}
QUICHE_DCHECK_LE(max_entries, std::numeric_limits<uint64_t>::max() / 32);
if (encoded_required_insert_count > 2 * max_entries) {
return false;
}
*required_insert_count = encoded_required_insert_count - 1;
QUICHE_DCHECK_LT(*required_insert_count,
std::numeric_limits<uint64_t>::max() / 16);
uint64_t current_wrapped = total_number_of_inserts % (2 * max_entries);
QUICHE_DCHECK_LT(current_wrapped, std::numeric_limits<uint64_t>::max() / 16);
if (current_wrapped >= *required_insert_count + max_entries) {
*required_insert_count += 2 * max_entries;
} else if (current_wrapped + max_entries < *required_insert_count) {
current_wrapped += 2 * max_entries;
}
if (*required_insert_count >
std::numeric_limits<uint64_t>::max() - total_number_of_inserts) {
return false;
}
*required_insert_count += total_number_of_inserts;
if (current_wrapped >= *required_insert_count) {
return false;
}
*required_insert_count -= current_wrapped;
return true;
}
} | #include "quiche/quic/core/qpack/qpack_required_insert_count.h"
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
TEST(QpackRequiredInsertCountTest, QpackEncodeRequiredInsertCount) {
EXPECT_EQ(0u, QpackEncodeRequiredInsertCount(0, 0));
EXPECT_EQ(0u, QpackEncodeRequiredInsertCount(0, 8));
EXPECT_EQ(0u, QpackEncodeRequiredInsertCount(0, 1024));
EXPECT_EQ(2u, QpackEncodeRequiredInsertCount(1, 8));
EXPECT_EQ(5u, QpackEncodeRequiredInsertCount(20, 8));
EXPECT_EQ(7u, QpackEncodeRequiredInsertCount(106, 10));
}
struct {
uint64_t required_insert_count;
uint64_t max_entries;
uint64_t total_number_of_inserts;
} kTestData[] = {
{0, 0, 0},
{0, 100, 0},
{0, 100, 500},
{15, 100, 25},
{20, 100, 10},
{90, 100, 110},
{234, 100, 180},
{5678, 100, 5701},
{401, 100, 500},
{600, 100, 500}};
TEST(QpackRequiredInsertCountTest, QpackDecodeRequiredInsertCount) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(kTestData); ++i) {
const uint64_t required_insert_count = kTestData[i].required_insert_count;
const uint64_t max_entries = kTestData[i].max_entries;
const uint64_t total_number_of_inserts =
kTestData[i].total_number_of_inserts;
if (required_insert_count != 0) {
ASSERT_LT(0u, max_entries) << i;
ASSERT_LT(total_number_of_inserts, required_insert_count + max_entries)
<< i;
ASSERT_LE(required_insert_count, total_number_of_inserts + max_entries)
<< i;
}
uint64_t encoded_required_insert_count =
QpackEncodeRequiredInsertCount(required_insert_count, max_entries);
uint64_t decoded_required_insert_count = required_insert_count + 1;
EXPECT_TRUE(QpackDecodeRequiredInsertCount(
encoded_required_insert_count, max_entries, total_number_of_inserts,
&decoded_required_insert_count))
<< i;
EXPECT_EQ(decoded_required_insert_count, required_insert_count) << i;
}
}
struct {
uint64_t encoded_required_insert_count;
uint64_t max_entries;
uint64_t total_number_of_inserts;
} kInvalidTestData[] = {
{1, 0, 0},
{9, 0, 0},
{1, 10, 2},
{18, 10, 2},
{400, 100, 500},
{601, 100, 500}};
TEST(QpackRequiredInsertCountTest, DecodeRequiredInsertCountError) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInvalidTestData); ++i) {
uint64_t decoded_required_insert_count = 0;
EXPECT_FALSE(QpackDecodeRequiredInsertCount(
kInvalidTestData[i].encoded_required_insert_count,
kInvalidTestData[i].max_entries,
kInvalidTestData[i].total_number_of_inserts,
&decoded_required_insert_count))
<< i;
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_required_insert_count.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_required_insert_count_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6abeb031-0f9b-4d31-b673-75daf31fd982 | cpp | google/glog | stacktrace | src/stacktrace.cc | src/stacktrace_unittest.cc | #include "stacktrace.h"
#if defined(STACKTRACE_H)
# include STACKTRACE_H
#endif | #include "stacktrace.h"
#include <cstdio>
#include <cstdlib>
#include "base/commandlineflags.h"
#include "config.h"
#include "glog/logging.h"
#include "utilities.h"
#ifdef HAVE_EXECINFO_BACKTRACE_SYMBOLS
# include <execinfo.h>
#endif
#ifdef HAVE_STACKTRACE
const int BACKTRACE_STEPS = 6;
struct AddressRange {
const void *start, *end;
};
AddressRange expected_range[BACKTRACE_STEPS];
# if __GNUC__
# define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \
do { \
(prange)->start = &&start_label; \
(prange)->end = &&end_label; \
CHECK_LT((prange)->start, (prange)->end); \
} while (0)
# define DECLARE_ADDRESS_LABEL(a_label) \
a_label: \
do { \
__asm__ __volatile__(""); \
} while (0)
# define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \
do { \
void* ra = __builtin_return_address(0); \
CHECK_LT((prange)->start, ra); \
if (ra > (prange)->end) { \
printf("Adjusting range from %p..%p to %p..%p\n", (prange)->start, \
(prange)->end, (prange)->start, ra); \
(prange)->end = ra; \
} \
} while (0)
# else
# define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \
do { \
(prange)->start = reinterpret_cast<const void*>(&fn); \
(prange)->end = reinterpret_cast<const char*>(&fn) + 256; \
} while (0)
# define DECLARE_ADDRESS_LABEL(a_label) \
do { \
} while (0)
# define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \
do { \
} while (0)
# endif
static void CheckRetAddrIsInFunction(void* ret_addr,
const AddressRange& range) {
CHECK_GE(ret_addr, range.start);
CHECK_LE(ret_addr, range.end);
}
# if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wgnu-label-as-value"
# endif
void ATTRIBUTE_NOINLINE CheckStackTrace(int);
static void ATTRIBUTE_NOINLINE CheckStackTraceLeaf() {
const int STACK_LEN = 10;
void* stack[STACK_LEN];
int size;
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[1]);
INIT_ADDRESS_RANGE(CheckStackTraceLeaf, start, end, &expected_range[0]);
DECLARE_ADDRESS_LABEL(start);
size = google::GetStackTrace(stack, STACK_LEN, 0);
printf("Obtained %d stack frames.\n", size);
CHECK_GE(size, 1);
CHECK_LE(size, STACK_LEN);
if (true) {
# ifdef HAVE_EXECINFO_BACKTRACE_SYMBOLS
char** strings = backtrace_symbols(stack, size);
printf("Obtained %d stack frames.\n", size);
for (int i = 0; i < size; i++) {
printf("%s %p\n", strings[i], stack[i]);
}
union {
void (*p1)(int);
void* p2;
} p = {&CheckStackTrace};
printf("CheckStackTrace() addr: %p\n", p.p2);
free(strings);
# endif
}
for (int i = 0; i < BACKTRACE_STEPS; i++) {
printf("Backtrace %d: expected: %p..%p actual: %p ... ", i,
expected_range[i].start, expected_range[i].end, stack[i]);
fflush(stdout);
CheckRetAddrIsInFunction(stack[i], expected_range[i]);
printf("OK\n");
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace4(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[2]);
INIT_ADDRESS_RANGE(CheckStackTrace4, start, end, &expected_range[1]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTraceLeaf();
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace3(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[3]);
INIT_ADDRESS_RANGE(CheckStackTrace3, start, end, &expected_range[2]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace4(j);
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace2(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[4]);
INIT_ADDRESS_RANGE(CheckStackTrace2, start, end, &expected_range[3]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace3(j);
}
DECLARE_ADDRESS_LABEL(end);
}
static void ATTRIBUTE_NOINLINE CheckStackTrace1(int i) {
ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[5]);
INIT_ADDRESS_RANGE(CheckStackTrace1, start, end, &expected_range[4]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace2(j);
}
DECLARE_ADDRESS_LABEL(end);
}
# ifndef __GNUC__
static
# endif
void ATTRIBUTE_NOINLINE
CheckStackTrace(int i) {
INIT_ADDRESS_RANGE(CheckStackTrace, start, end, &expected_range[5]);
DECLARE_ADDRESS_LABEL(start);
for (int j = i; j >= 0; j--) {
CheckStackTrace1(j);
}
DECLARE_ADDRESS_LABEL(end);
}
# if defined(__clang__)
# pragma clang diagnostic pop
# endif
int main(int, char** argv) {
FLAGS_logtostderr = true;
google::InitGoogleLogging(argv[0]);
CheckStackTrace(0);
printf("PASS\n");
return 0;
}
#else
int main() {
# ifdef GLOG_BAZEL_BUILD
printf("HAVE_STACKTRACE is expected to be defined in Bazel tests\n");
exit(EXIT_FAILURE);
# endif
printf("PASS (no stacktrace support)\n");
return 0;
}
#endif | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/stacktrace.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/stacktrace_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
c60900b9-5117-4a35-921c-e7e73ded1358 | cpp | tensorflow/tensorflow | xplane_to_dcn_collective_stats | tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.cc | tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/dcn_slack_analysis_combiner.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/xspace_to_dcn_slack_analysis.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
bool HasDcnCollectiveStatsInXSpace(const XSpace& xspace) {
if (const tensorflow::profiler::XPlane* xplane = FindPlaneWithName(
xspace, tensorflow::profiler::kHostThreadsPlaneName);
xplane != nullptr) {
for (const auto& [_, metadata] : xplane->event_metadata()) {
if (absl::StartsWith(metadata.name(), "MegaScale:")) {
return true;
}
}
}
return false;
}
absl::StatusOr<bool> GetDcnCollectiveStatsFromMultiXSpaceAndSaveToFile(
const SessionSnapshot& session_snapshot) {
DcnSlackAnalysisCombiner combiner;
for (int idx = 0; idx < session_snapshot.XSpaceSize(); idx++) {
std::string hostname = session_snapshot.GetHostname(idx);
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(idx));
if (!HasDcnCollectiveStatsInXSpace(*xspace)) {
DcnSlackAnalysis dcnSlackAnalysis;
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
kNoHostIdentifier, dcnSlackAnalysis));
return false;
}
DcnSlackAnalysis dcnSlackAnalysis =
ConvertXSpaceToDcnSlackAnalysis(*xspace, nullptr, nullptr);
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
hostname, dcnSlackAnalysis));
combiner.Combine(dcnSlackAnalysis);
}
DcnSlackAnalysis dcnSlackAnalysis = combiner.Finalize();
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
kAllHostsIdentifier, dcnSlackAnalysis));
return true;
}
}
absl::StatusOr<bool> HasDcnCollectiveStatsInMultiXSpace(
const SessionSnapshot& session_snapshot) {
std::pair<bool, std::string> hasCacheFile;
TF_ASSIGN_OR_RETURN(hasCacheFile, session_snapshot.HasCacheFile(
StoredDataType::DCN_COLLECTIVE_STATS));
if (!hasCacheFile.first) {
for (int idx = 0; idx < session_snapshot.XSpaceSize(); idx++) {
std::string hostname = session_snapshot.GetHostname(idx);
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(idx));
if (HasDcnCollectiveStatsInXSpace(*xspace)) {
return true;
}
}
return false;
}
if (hasCacheFile.second.empty()) {
return false;
} else {
return true;
}
}
absl::StatusOr<bool> ConvertMultiXSpaceToDcnCollectiveStats(
const SessionSnapshot& session_snapshot) {
std::pair<bool, std::string> hasCacheFile;
TF_ASSIGN_OR_RETURN(hasCacheFile, session_snapshot.HasCacheFile(
StoredDataType::DCN_COLLECTIVE_STATS));
if (!hasCacheFile.first) {
return GetDcnCollectiveStatsFromMultiXSpaceAndSaveToFile(session_snapshot);
}
if (hasCacheFile.second.empty()) {
return false;
} else {
return true;
}
}
absl::StatusOr<DcnSlackAnalysis> GetDcnSlackAnalysisByHostName(
const SessionSnapshot& session_snapshot, const std::string hostname) {
TF_ASSIGN_OR_RETURN(bool hasDcnCollectiveStats,
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot));
DcnSlackAnalysis dcnSlackAnalysis;
if (hasDcnCollectiveStats) {
TF_RETURN_IF_ERROR(ReadBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
hostname, &dcnSlackAnalysis));
}
return dcnSlackAnalysis;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
DcnSlackAnalysis CreateDcnSlackAnalysisProto() {
DcnSlackAnalysis dcn_slack_analysis;
DcnSlackSummary* dcn_slack_summary =
dcn_slack_analysis.add_dcn_slack_summary();
dcn_slack_summary->set_rendezvous("collective");
dcn_slack_summary->set_recv_op_name("recv-done");
dcn_slack_summary->set_send_op_name("send");
dcn_slack_summary->set_slack_us(2);
dcn_slack_summary->set_observed_duration_us(12);
dcn_slack_summary->set_stall_duration_us(5);
dcn_slack_summary->set_occurrences(4);
dcn_slack_summary->set_bytes_transmitted_over_network(819200);
return dcn_slack_analysis;
}
SessionSnapshot CreateSessionSnapshot(bool create_cache_file,
bool has_dcn_collective_stats) {
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string path = absl::StrCat("ram:
std::unique_ptr<WritableFile> xplane_file;
std::vector<std::string> paths = {absl::StrCat(path, "hostname.xplane.pb")};
auto xspace = std::make_unique<XSpace>();
XPlane* xplane = FindOrAddMutablePlaneWithName(xspace.get(), "/host:CPU");
if (has_dcn_collective_stats) {
XPlaneBuilder xplane_builder(xplane);
xplane_builder.GetOrCreateEventMetadata("MegaScale:");
}
if (create_cache_file) {
if (has_dcn_collective_stats) {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "hostname.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "ALL_HOSTS.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "NO_HOST.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
}
}
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace));
absl::StatusOr<SessionSnapshot> session_snapshot_status =
SessionSnapshot::Create(paths, std::move(xspaces));
TF_CHECK_OK(session_snapshot_status.status());
SessionSnapshot session_snapshot = std::move(session_snapshot_status.value());
if (has_dcn_collective_stats) {
DcnSlackAnalysis dcn_slack_analysis = CreateDcnSlackAnalysisProto();
TF_CHECK_OK(session_snapshot.WriteBinaryProto(
DCN_COLLECTIVE_STATS, "hostname", dcn_slack_analysis));
TF_CHECK_OK(session_snapshot.WriteBinaryProto(
DCN_COLLECTIVE_STATS, kAllHostsIdentifier, dcn_slack_analysis));
}
return session_snapshot;
}
TEST(ConvertXplaneToDcnCollectiveStats,
HasAllHostsDcnCollectiveStatsCacheFile) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, true);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), true);
}
TEST(ConvertXplaneToDcnCollectiveStats, HasNoHostDcnCollectiveStatsCacheFile) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, false);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), false);
}
TEST(ConvertXplaneToDcnCollectiveStats,
NoCacheFileButTraceHasDcnCollectiveStats) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, true);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), true);
}
TEST(ConvertXplaneToDcnCollectiveStats,
NoCacheFileNoDcnCollectiveStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), false);
}
TEST(ConvertXplaneToDcnCollectiveStats,
ConvertXSpaceToDcnCollectiveStatsWhenStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, true);
absl::StatusOr<bool> status =
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot);
absl::StatusOr<std::optional<std::string>> all_hosts_filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
kAllHostsIdentifier);
absl::StatusOr<std::optional<std::string>> host_filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
"hostname");
EXPECT_EQ(status.value(), true);
TF_EXPECT_OK(all_hosts_filepath.status());
EXPECT_TRUE(all_hosts_filepath.value().has_value());
EXPECT_FALSE(all_hosts_filepath.value().value().empty());
TF_EXPECT_OK(host_filepath.status());
EXPECT_TRUE(host_filepath.value().has_value());
EXPECT_FALSE(host_filepath.value().value().empty());
}
TEST(ConvertXplaneToDcnCollectiveStats,
ConvertXSpaceToDcnCollectiveStatsWhenStatsNotPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<bool> status =
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot);
absl::StatusOr<std::optional<std::string>> filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
kNoHostIdentifier);
EXPECT_EQ(status.value(), false);
TF_EXPECT_OK(filepath.status());
EXPECT_TRUE(filepath.value().has_value());
EXPECT_FALSE(filepath.value().value().empty());
}
TEST(ConvertXplaneToDcnCollectiveStats,
GetHostDcnSlackAnalysisWhenStatsNotPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<DcnSlackAnalysis> host_dcn_slack_analysis =
GetDcnSlackAnalysisByHostName(session_snapshot, "hostname");
TF_EXPECT_OK(host_dcn_slack_analysis.status());
EXPECT_EQ(host_dcn_slack_analysis.value().dcn_slack_summary_size(), 0);
}
TEST(ConvertXplaneToDcnCollectiveStats,
GetHostDcnSlackAnalysisWhenStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, true);
absl::StatusOr<DcnSlackAnalysis> host_dcn_slack_analysis =
GetDcnSlackAnalysisByHostName(session_snapshot, "hostname");
TF_EXPECT_OK(host_dcn_slack_analysis.status());
EXPECT_EQ(host_dcn_slack_analysis.value().dcn_slack_summary_size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4645f1f3-7545-471e-aacc-a06c1b1d4adb | cpp | google/quiche | quic_ping_manager | quiche/quic/core/quic_ping_manager.cc | quiche/quic/core/quic_ping_manager_test.cc | #include "quiche/quic/core/quic_ping_manager.h"
#include <algorithm>
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
namespace {
const int kMaxRetransmittableOnWireDelayShift = 10;
}
QuicPingManager::QuicPingManager(Perspective perspective, Delegate* delegate,
QuicAlarm* alarm)
: perspective_(perspective), delegate_(delegate), alarm_(*alarm) {}
void QuicPingManager::SetAlarm(QuicTime now, bool should_keep_alive,
bool has_in_flight_packets) {
UpdateDeadlines(now, should_keep_alive, has_in_flight_packets);
const QuicTime earliest_deadline = GetEarliestDeadline();
if (!earliest_deadline.IsInitialized()) {
alarm_.Cancel();
return;
}
if (earliest_deadline == keep_alive_deadline_) {
alarm_.Update(earliest_deadline, QuicTime::Delta::FromSeconds(1));
return;
}
alarm_.Update(earliest_deadline, kAlarmGranularity);
}
void QuicPingManager::OnAlarm() {
const QuicTime earliest_deadline = GetEarliestDeadline();
if (!earliest_deadline.IsInitialized()) {
QUIC_BUG(quic_ping_manager_alarm_fires_unexpectedly)
<< "QuicPingManager alarm fires unexpectedly.";
return;
}
if (earliest_deadline == retransmittable_on_wire_deadline_) {
retransmittable_on_wire_deadline_ = QuicTime::Zero();
if (GetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count) !=
0) {
++consecutive_retransmittable_on_wire_count_;
}
++retransmittable_on_wire_count_;
delegate_->OnRetransmittableOnWireTimeout();
return;
}
if (earliest_deadline == keep_alive_deadline_) {
keep_alive_deadline_ = QuicTime::Zero();
delegate_->OnKeepAliveTimeout();
}
}
void QuicPingManager::Stop() {
alarm_.PermanentCancel();
retransmittable_on_wire_deadline_ = QuicTime::Zero();
keep_alive_deadline_ = QuicTime::Zero();
}
void QuicPingManager::UpdateDeadlines(QuicTime now, bool should_keep_alive,
bool has_in_flight_packets) {
keep_alive_deadline_ = QuicTime::Zero();
if (perspective_ == Perspective::IS_SERVER &&
initial_retransmittable_on_wire_timeout_.IsInfinite()) {
QUICHE_DCHECK(!retransmittable_on_wire_deadline_.IsInitialized());
return;
}
if (!should_keep_alive) {
retransmittable_on_wire_deadline_ = QuicTime::Zero();
return;
}
if (perspective_ == Perspective::IS_CLIENT) {
keep_alive_deadline_ = now + keep_alive_timeout_;
}
if (initial_retransmittable_on_wire_timeout_.IsInfinite() ||
has_in_flight_packets ||
retransmittable_on_wire_count_ >
GetQuicFlag(quic_max_retransmittable_on_wire_ping_count)) {
retransmittable_on_wire_deadline_ = QuicTime::Zero();
return;
}
QUICHE_DCHECK_LT(initial_retransmittable_on_wire_timeout_,
keep_alive_timeout_);
QuicTime::Delta retransmittable_on_wire_timeout =
initial_retransmittable_on_wire_timeout_;
const int max_aggressive_retransmittable_on_wire_count =
GetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count);
QUICHE_DCHECK_LE(0, max_aggressive_retransmittable_on_wire_count);
if (consecutive_retransmittable_on_wire_count_ >
max_aggressive_retransmittable_on_wire_count) {
int shift = std::min(consecutive_retransmittable_on_wire_count_ -
max_aggressive_retransmittable_on_wire_count,
kMaxRetransmittableOnWireDelayShift);
retransmittable_on_wire_timeout =
initial_retransmittable_on_wire_timeout_ * (1 << shift);
}
if (retransmittable_on_wire_deadline_.IsInitialized() &&
retransmittable_on_wire_deadline_ <
now + retransmittable_on_wire_timeout) {
return;
}
retransmittable_on_wire_deadline_ = now + retransmittable_on_wire_timeout;
}
QuicTime QuicPingManager::GetEarliestDeadline() const {
QuicTime earliest_deadline = QuicTime::Zero();
for (QuicTime t : {retransmittable_on_wire_deadline_, keep_alive_deadline_}) {
if (!t.IsInitialized()) {
continue;
}
if (!earliest_deadline.IsInitialized() || t < earliest_deadline) {
earliest_deadline = t;
}
}
return earliest_deadline;
}
} | #include "quiche/quic/core/quic_ping_manager.h"
#include "quiche/quic/core/quic_connection_alarms.h"
#include "quiche/quic/core/quic_one_block_arena.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_quic_connection_alarms.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
class QuicPingManagerPeer {
public:
static QuicAlarm* GetAlarm(QuicPingManager* manager) {
return &manager->alarm_;
}
static void SetPerspective(QuicPingManager* manager,
Perspective perspective) {
manager->perspective_ = perspective;
}
};
namespace {
const bool kShouldKeepAlive = true;
const bool kHasInflightPackets = true;
class MockDelegate : public QuicPingManager::Delegate {
public:
MOCK_METHOD(void, OnKeepAliveTimeout, (), (override));
MOCK_METHOD(void, OnRetransmittableOnWireTimeout, (), (override));
};
class QuicPingManagerTest : public QuicTest {
public:
QuicPingManagerTest()
: alarms_(&connection_alarms_delegate_, alarm_factory_, arena_),
manager_(Perspective::IS_CLIENT, &delegate_, &alarms_.ping_alarm()),
alarm_(static_cast<MockAlarmFactory::TestAlarm*>(
QuicPingManagerPeer::GetAlarm(&manager_))) {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
ON_CALL(connection_alarms_delegate_, OnPingAlarm()).WillByDefault([&] {
manager_.OnAlarm();
});
}
protected:
testing::StrictMock<MockDelegate> delegate_;
MockConnectionAlarmsDelegate connection_alarms_delegate_;
MockClock clock_;
QuicConnectionArena arena_;
MockAlarmFactory alarm_factory_;
QuicConnectionAlarms alarms_;
QuicPingManager manager_;
MockAlarmFactory::TestAlarm* alarm_;
};
TEST_F(QuicPingManagerTest, KeepAliveTimeout) {
EXPECT_FALSE(alarm_->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs) -
QuicTime::Delta::FromMilliseconds(5),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(kPingTimeoutSecs));
EXPECT_CALL(delegate_, OnKeepAliveTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), !kShouldKeepAlive,
kHasInflightPackets);
EXPECT_FALSE(alarm_->IsSet());
}
TEST_F(QuicPingManagerTest, CustomizedKeepAliveTimeout) {
EXPECT_FALSE(alarm_->IsSet());
manager_.set_keep_alive_timeout(QuicTime::Delta::FromSeconds(10));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(10),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(
QuicTime::Delta::FromSeconds(10) - QuicTime::Delta::FromMilliseconds(5),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(10));
EXPECT_CALL(delegate_, OnKeepAliveTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), !kShouldKeepAlive,
kHasInflightPackets);
EXPECT_FALSE(alarm_->IsSet());
}
TEST_F(QuicPingManagerTest, RetransmittableOnWireTimeout) {
const QuicTime::Delta kRtransmittableOnWireTimeout =
QuicTime::Delta::FromMilliseconds(50);
manager_.set_initial_retransmittable_on_wire_timeout(
kRtransmittableOnWireTimeout);
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(kRtransmittableOnWireTimeout,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(kRtransmittableOnWireTimeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
ASSERT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
}
TEST_F(QuicPingManagerTest, RetransmittableOnWireTimeoutExponentiallyBackOff) {
const int kMaxAggressiveRetransmittableOnWireCount = 5;
SetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count,
kMaxAggressiveRetransmittableOnWireCount);
const QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
manager_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
for (int i = 0; i <= kMaxAggressiveRetransmittableOnWireCount; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
}
QuicTime::Delta retransmittable_on_wire_timeout =
initial_retransmittable_on_wire_timeout;
while (retransmittable_on_wire_timeout * 2 <
QuicTime::Delta::FromSeconds(kPingTimeoutSecs)) {
retransmittable_on_wire_timeout = retransmittable_on_wire_timeout * 2;
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(retransmittable_on_wire_timeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
}
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs) -
QuicTime::Delta::FromMilliseconds(5),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(kPingTimeoutSecs) -
QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(delegate_, OnKeepAliveTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
}
TEST_F(QuicPingManagerTest,
ResetRetransmitableOnWireTimeoutExponentiallyBackOff) {
const int kMaxAggressiveRetransmittableOnWireCount = 3;
SetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count,
kMaxAggressiveRetransmittableOnWireCount);
const QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
manager_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
alarm_->Fire();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
manager_.reset_consecutive_retransmittable_on_wire_count();
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
alarm_->Fire();
for (int i = 0; i < kMaxAggressiveRetransmittableOnWireCount; i++) {
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
}
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout * 2,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(2 * initial_retransmittable_on_wire_timeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
manager_.reset_consecutive_retransmittable_on_wire_count();
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
}
TEST_F(QuicPingManagerTest, RetransmittableOnWireLimit) {
static constexpr int kMaxRetransmittableOnWirePingCount = 3;
SetQuicFlag(quic_max_retransmittable_on_wire_ping_count,
kMaxRetransmittableOnWirePingCount);
static constexpr QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
static constexpr QuicTime::Delta kShortDelay =
QuicTime::Delta::FromMilliseconds(5);
ASSERT_LT(kShortDelay * 10, initial_retransmittable_on_wire_timeout);
manager_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
clock_.AdvanceTime(kShortDelay);
EXPECT_FALSE(alarm_->IsSet());
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
for (int i = 0; i <= kMaxRetransmittableOnWirePingCount; i++) {
clock_.AdvanceTime(kShortDelay);
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
}
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(kPingTimeoutSecs));
EXPECT_CALL(delegate_, OnKeepAliveTimeout());
alarm_->Fire();
EXPECT_FALSE(alarm_->IsSet());
}
TEST_F(QuicPingManagerTest, MaxRetransmittableOnWireDelayShift) {
QuicPingManagerPeer::SetPerspective(&manager_, Perspective::IS_SERVER);
const int kMaxAggressiveRetransmittableOnWireCount = 3;
SetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count,
kMaxAggressiveRetransmittableOnWireCount);
const QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
manager_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
for (int i = 0; i <= kMaxAggressiveRetransmittableOnWireCount; i++) {
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
alarm_->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
kHasInflightPackets);
}
for (int i = 1; i <= 20; ++i) {
manager_.SetAlarm(clock_.ApproximateNow(), kShouldKeepAlive,
!kHasInflightPackets);
EXPECT_TRUE(alarm_->IsSet());
if (i <= 10) {
EXPECT_EQ(initial_retransmittable_on_wire_timeout * (1 << i),
alarm_->deadline() - clock_.ApproximateNow());
} else {
EXPECT_EQ(initial_retransmittable_on_wire_timeout * (1 << 10),
alarm_->deadline() - clock_.ApproximateNow());
}
clock_.AdvanceTime(alarm_->deadline() - clock_.ApproximateNow());
EXPECT_CALL(delegate_, OnRetransmittableOnWireTimeout());
alarm_->Fire();
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_ping_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_ping_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
28bbb614-9b57-4c27-aa28-be37452264de | cpp | google/tensorstore | curl_transport | tensorstore/internal/http/curl_transport.cc | tensorstore/internal/http/curl_transport_test.cc | #include "tensorstore/internal/http/curl_transport.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <curl/curl.h>
#include "tensorstore/internal/container/circular_queue.h"
#include "tensorstore/internal/cord_util.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_factory.h"
#include "tensorstore/internal/http/curl_handle.h"
#include "tensorstore/internal/http/curl_wrappers.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/thread/thread.h"
ABSL_FLAG(std::optional<uint32_t>, tensorstore_http_threads, std::nullopt,
"Threads to use for http requests. "
"Overrides TENSORSTORE_HTTP_THREADS.");
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal_container::CircularQueue;
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal_http {
namespace {
auto& http_request_started = internal_metrics::Counter<int64_t>::New(
"/tensorstore/http/request_started",
MetricMetadata("HTTP requests started"));
auto& http_request_completed = internal_metrics::Counter<int64_t>::New(
"/tensorstore/http/request_completed",
MetricMetadata("HTTP requests completed"));
auto& http_request_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/request_bytes",
MetricMetadata("HTTP request bytes transmitted",
internal_metrics::Units::kBytes));
auto& http_request_header_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/request_header_bytes",
MetricMetadata("HTTP request bytes transmitted",
internal_metrics::Units::kBytes));
auto& http_response_codes = internal_metrics::Counter<int64_t, int>::New(
"/tensorstore/http/response_codes", "code",
MetricMetadata("HTTP response status code counts"));
auto& http_response_bytes =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/response_bytes",
MetricMetadata("HTTP response bytes received",
internal_metrics::Units::kBytes));
auto& http_active = internal_metrics::Gauge<int64_t>::New(
"/tensorstore/http/active",
MetricMetadata("HTTP requests considered active"));
auto& http_total_time_ms =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/total_time_ms",
MetricMetadata("HTTP total latency (ms)",
internal_metrics::Units::kMilliseconds));
auto& http_first_byte_latency_us =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/first_byte_latency_us",
MetricMetadata("HTTP first byte received latency (us)",
internal_metrics::Units::kMicroseconds));
auto& http_poll_time_ns =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/http/http_poll_time_ns",
MetricMetadata("HTTP time spent in curl_multi_poll (ns)",
internal_metrics::Units::kNanoseconds));
uint32_t GetHttpThreads() {
return std::max(1u, GetFlagOrEnvValue(FLAGS_tensorstore_http_threads,
"TENSORSTORE_HTTP_THREADS")
.value_or(4u));
}
struct CurlRequestState {
std::shared_ptr<CurlHandleFactory> factory_;
CurlHandle handle_;
CurlHeaders headers_;
absl::Cord payload_;
absl::Cord::CharIterator payload_it_;
size_t payload_remaining_;
HttpResponseHandler* response_handler_ = nullptr;
size_t response_payload_size_ = 0;
bool status_set = false;
char error_buffer_[CURL_ERROR_SIZE];
CurlRequestState(std::shared_ptr<CurlHandleFactory> factory)
: factory_(std::move(factory)), handle_(CurlHandle::Create(*factory_)) {
error_buffer_[0] = 0;
handle_.SetOption(CURLOPT_ERRORBUFFER, error_buffer_);
handle_.SetOption(CURLOPT_BUFFERSIZE, 512 * 1024);
handle_.SetOption(CURLOPT_TCP_NODELAY, 1L);
handle_.SetOption(CURLOPT_WRITEDATA, this);
handle_.SetOption(CURLOPT_WRITEFUNCTION,
&CurlRequestState::CurlWriteCallback);
handle_.SetOption(CURLOPT_HEADERDATA, this);
handle_.SetOption(CURLOPT_HEADERFUNCTION,
&CurlRequestState::CurlHeaderCallback);
}
~CurlRequestState() {
handle_.SetOption(CURLOPT_WRITEDATA, nullptr);
handle_.SetOption(CURLOPT_WRITEFUNCTION, nullptr);
handle_.SetOption(CURLOPT_READDATA, nullptr);
handle_.SetOption(CURLOPT_READFUNCTION, nullptr);
handle_.SetOption(CURLOPT_SEEKDATA, nullptr);
handle_.SetOption(CURLOPT_SEEKFUNCTION, nullptr);
handle_.SetOption(CURLOPT_HEADERDATA, nullptr);
handle_.SetOption(CURLOPT_HEADERFUNCTION, nullptr);
handle_.SetOption(CURLOPT_ERRORBUFFER, nullptr);
CurlHandle::Cleanup(*factory_, std::move(handle_));
}
void Prepare(const HttpRequest& request, IssueRequestOptions options) {
handle_.SetOption(CURLOPT_URL, request.url.c_str());
std::string user_agent = request.user_agent + GetCurlUserAgentSuffix();
handle_.SetOption(CURLOPT_USERAGENT, user_agent.c_str());
curl_slist* head = nullptr;
size_t header_bytes_ = 0;
for (const std::string& h : request.headers) {
head = curl_slist_append(head, h.c_str());
header_bytes_ += h.size();
}
headers_.reset(head);
handle_.SetOption(CURLOPT_HTTPHEADER, headers_.get());
if (request.accept_encoding) {
handle_.SetOption(CURLOPT_ACCEPT_ENCODING, "");
}
if (options.request_timeout > absl::ZeroDuration()) {
auto ms = absl::ToInt64Milliseconds(options.request_timeout);
handle_.SetOption(CURLOPT_TIMEOUT_MS, ms > 0 ? ms : 1);
}
if (options.connect_timeout > absl::ZeroDuration()) {
auto ms = absl::ToInt64Milliseconds(options.connect_timeout);
handle_.SetOption(CURLOPT_CONNECTTIMEOUT_MS, ms > 0 ? ms : 1);
}
payload_ = std::move(options.payload);
payload_remaining_ = payload_.size();
if (payload_remaining_ > 0) {
payload_it_ = payload_.char_begin();
handle_.SetOption(CURLOPT_READDATA, this);
handle_.SetOption(CURLOPT_READFUNCTION,
&CurlRequestState::CurlReadCallback);
handle_.SetOption(CURLOPT_SEEKDATA, this);
handle_.SetOption(CURLOPT_SEEKFUNCTION,
&CurlRequestState::CurlSeekCallback);
}
if (request.method == "GET") {
handle_.SetOption(CURLOPT_PIPEWAIT, 1L);
handle_.SetOption(CURLOPT_HTTPGET, 1L);
} else if (request.method == "HEAD") {
handle_.SetOption(CURLOPT_NOBODY, 1L);
} else if (request.method == "PUT") {
handle_.SetOption(CURLOPT_UPLOAD, 1L);
handle_.SetOption(CURLOPT_PUT, 1L);
handle_.SetOption(CURLOPT_INFILESIZE_LARGE, payload_remaining_);
} else if (request.method == "POST") {
handle_.SetOption(CURLOPT_POST, 1L);
handle_.SetOption(CURLOPT_POSTFIELDSIZE_LARGE, payload_remaining_);
} else if (request.method == "PATCH") {
handle_.SetOption(CURLOPT_UPLOAD, 1L);
handle_.SetOption(CURLOPT_CUSTOMREQUEST, "PATCH");
handle_.SetOption(CURLOPT_POSTFIELDSIZE_LARGE, payload_remaining_);
} else {
handle_.SetOption(CURLOPT_CUSTOMREQUEST, request.method.c_str());
}
switch (options.http_version) {
case IssueRequestOptions::HttpVersion::kHttp1:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
break;
case IssueRequestOptions::HttpVersion::kHttp2:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0);
break;
case IssueRequestOptions::HttpVersion::kHttp2TLS:
handle_.SetOption(CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
break;
case IssueRequestOptions::HttpVersion::kHttp2PriorKnowledge:
handle_.SetOption(CURLOPT_HTTP_VERSION,
CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE);
break;
default:
break;
}
http_request_started.Increment();
http_request_bytes.Observe(payload_remaining_);
http_request_header_bytes.Observe(header_bytes_);
}
void SetForbidReuse() {
handle_.SetOption(CURLOPT_FORBID_REUSE, 1);
}
bool MaybeSetStatusAndProcess() {
if (status_set) return true;
auto status_code = handle_.GetResponseCode();
if (status_code < 200) return false;
response_handler_->OnStatus(status_code);
status_set = true;
return true;
}
static size_t CurlHeaderCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
auto data =
std::string_view(static_cast<char const*>(contents), size * nmemb);
if (self->MaybeSetStatusAndProcess()) {
self->response_handler_->OnResponseHeader(data);
}
return data.size();
}
static size_t CurlWriteCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
auto data =
std::string_view(static_cast<char const*>(contents), size * nmemb);
if (self->MaybeSetStatusAndProcess()) {
self->response_payload_size_ += data.size();
self->response_handler_->OnResponseBody(data);
}
return data.size();
}
static size_t CurlReadCallback(void* contents, size_t size, size_t nmemb,
void* userdata) {
auto* self = static_cast<CurlRequestState*>(userdata);
size_t n = std::min(size * nmemb, self->payload_remaining_);
internal::CopyCordToSpan(self->payload_it_, {static_cast<char*>(contents),
static_cast<ptrdiff_t>(n)});
self->payload_remaining_ -= n;
return n;
}
static int CurlSeekCallback(void* userdata, curl_off_t offset, int origin) {
if (origin != SEEK_SET) {
return CURL_SEEKFUNC_CANTSEEK;
}
auto* self = static_cast<CurlRequestState*>(userdata);
if (offset < 0 || offset > self->payload_.size()) {
return CURL_SEEKFUNC_FAIL;
}
self->payload_it_ = self->payload_.char_begin();
absl::Cord::Advance(&self->payload_it_, static_cast<size_t>(offset));
self->payload_remaining_ =
self->payload_.size() - static_cast<size_t>(offset);
return CURL_SEEKFUNC_OK;
}
};
class MultiTransportImpl {
public:
MultiTransportImpl(std::shared_ptr<CurlHandleFactory> factory,
size_t nthreads);
~MultiTransportImpl();
void EnqueueRequest(const HttpRequest& request, IssueRequestOptions options,
HttpResponseHandler* response_handler);
void FinishRequest(std::unique_ptr<CurlRequestState> state, CURLcode code);
private:
struct ThreadData {
std::atomic<int64_t> count = 0;
CurlMulti multi;
absl::Mutex mutex;
CircularQueue<std::unique_ptr<CurlRequestState>> pending{16};
bool done = false;
};
void Run(ThreadData& thread_data);
void MaybeAddPendingTransfers(ThreadData& thread_data);
void RemoveCompletedTransfers(ThreadData& thread_data);
std::shared_ptr<CurlHandleFactory> factory_;
std::atomic<bool> done_{false};
std::unique_ptr<ThreadData[]> thread_data_;
std::vector<internal::Thread> threads_;
};
MultiTransportImpl::MultiTransportImpl(
std::shared_ptr<CurlHandleFactory> factory, size_t nthreads)
: factory_(std::move(factory)) {
assert(factory_);
threads_.reserve(nthreads);
thread_data_ = std::make_unique<ThreadData[]>(nthreads);
for (size_t i = 0; i < nthreads; ++i) {
thread_data_[i].multi = factory_->CreateMultiHandle();
threads_.push_back(
internal::Thread({"curl_multi_thread"},
[this, index = i] { Run(thread_data_[index]); }));
}
}
MultiTransportImpl::~MultiTransportImpl() {
done_ = true;
for (size_t i = 0; i < threads_.size(); ++i) {
auto& thread_data = thread_data_[i];
absl::MutexLock l(&thread_data.mutex);
thread_data.done = true;
curl_multi_wakeup(thread_data.multi.get());
}
for (auto& thread : threads_) {
thread.Join();
}
for (size_t i = 0; i < threads_.size(); ++i) {
factory_->CleanupMultiHandle(std::move(thread_data_[i].multi));
}
}
void MultiTransportImpl::EnqueueRequest(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) {
if (done_.load()) {
response_handler->OnFailure(
absl::InternalError("MultiTransportImpl is shutting down"));
return;
}
auto state = std::make_unique<CurlRequestState>(factory_);
state->response_handler_ = response_handler;
state->Prepare(request, std::move(options));
size_t selected_index = 0;
for (size_t i = 1; i < threads_.size(); ++i) {
if (thread_data_[i].count < thread_data_[selected_index].count) {
selected_index = i;
}
}
auto& selected = thread_data_[selected_index];
absl::MutexLock l(&selected.mutex);
selected.pending.push_back(std::move(state));
selected.count++;
curl_multi_wakeup(selected.multi.get());
}
void MultiTransportImpl::FinishRequest(std::unique_ptr<CurlRequestState> state,
CURLcode code) {
if (code == CURLE_HTTP2) {
ABSL_LOG(WARNING) << "CURLE_HTTP2 " << state->error_buffer_;
state->SetForbidReuse();
}
http_request_completed.Increment();
http_response_bytes.Observe(state->response_payload_size_);
{
curl_off_t first_byte_us = 0;
state->handle_.GetInfo(CURLINFO_STARTTRANSFER_TIME_T, &first_byte_us);
http_first_byte_latency_us.Observe(first_byte_us);
}
{
curl_off_t total_time_us = 0;
state->handle_.GetInfo(CURLINFO_TOTAL_TIME_T, &total_time_us);
http_total_time_ms.Observe(total_time_us / 1000);
}
if (code != CURLE_OK) {
state->response_handler_->OnFailure(
CurlCodeToStatus(code, state->error_buffer_));
return;
}
http_response_codes.Increment(state->handle_.GetResponseCode());
assert(state->status_set);
state->response_handler_->OnComplete();
}
void MultiTransportImpl::Run(ThreadData& thread_data) {
for (;;) {
MaybeAddPendingTransfers(thread_data);
if (thread_data.count == 0) {
absl::MutexLock l(&thread_data.mutex);
if (thread_data.done) break;
thread_data.mutex.Await(absl::Condition(
+[](ThreadData* td) { return !td->pending.empty() || td->done; },
&thread_data));
if (thread_data.done) break;
continue;
}
const int timeout_ms = std::numeric_limits<int>::max();
int numfds = 0;
errno = 0;
auto start_poll = absl::Now();
CURLMcode mcode = curl_multi_poll(thread_data.multi.get(), nullptr, 0,
timeout_ms, &numfds);
if (mcode != CURLM_OK) {
ABSL_LOG(WARNING) << CurlMCodeToStatus(mcode, "in curl_multi_poll");
}
http_poll_time_ns.Observe(
absl::ToInt64Nanoseconds(absl::Now() - start_poll));
{
int running_handles = 0;
CURLMcode mcode;
do {
mcode = curl_multi_perform(thread_data.multi.get(), &running_handles);
http_active.Set(running_handles);
} while (mcode == CURLM_CALL_MULTI_PERFORM);
if (mcode != CURLM_OK) {
ABSL_LOG(WARNING) << CurlMCodeToStatus(mcode, "in curl_multi_perform");
}
}
RemoveCompletedTransfers(thread_data);
}
assert(thread_data.count == 0);
}
void MultiTransportImpl::MaybeAddPendingTransfers(ThreadData& thread_data) {
absl::MutexLock l(&thread_data.mutex);
while (!thread_data.pending.empty()) {
std::unique_ptr<CurlRequestState> state =
std::move(thread_data.pending.front());
thread_data.pending.pop_front();
assert(state != nullptr);
state->handle_.SetOption(CURLOPT_PRIVATE, state.get());
CURL* e = state->handle_.get();
CURLMcode mcode = curl_multi_add_handle(thread_data.multi.get(), e);
if (mcode == CURLM_OK) {
state.release();
} else {
thread_data.count--;
state->handle_.SetOption(CURLOPT_PRIVATE, nullptr);
state->response_handler_->OnFailure(
CurlMCodeToStatus(mcode, "in curl_multi_add_handle"));
}
};
}
void MultiTransportImpl::RemoveCompletedTransfers(ThreadData& thread_data) {
CURLMsg* m = nullptr;
do {
int messages_in_queue;
m = curl_multi_info_read(thread_data.multi.get(), &messages_in_queue);
if (m && m->msg == CURLMSG_DONE) {
CURLcode result = m->data.result;
CURL* e = m->easy_handle;
curl_multi_remove_handle(thread_data.multi.get(), e);
thread_data.count--;
CurlRequestState* pvt = nullptr;
curl_easy_getinfo(e, CURLINFO_PRIVATE, &pvt);
assert(pvt);
std::unique_ptr<CurlRequestState> state(pvt);
state->handle_.SetOption(CURLOPT_PRIVATE, nullptr);
FinishRequest(std::move(state), result);
}
} while (m != nullptr);
}
}
class CurlTransport::Impl : public MultiTransportImpl {
public:
using MultiTransportImpl::MultiTransportImpl;
};
CurlTransport::CurlTransport(std::shared_ptr<CurlHandleFactory> factory)
: impl_(std::make_unique<Impl>(std::move(factory),
GetHttpThreads())) {}
CurlTransport::~CurlTransport() = default;
void CurlTransport::IssueRequestWithHandler(
const HttpRequest& request, IssueRequestOptions options,
HttpResponseHandler* response_handler) {
assert(impl_);
impl_->EnqueueRequest(request, std::move(options), response_handler);
}
namespace {
struct GlobalTransport {
std::shared_ptr<HttpTransport> transport_;
std::shared_ptr<HttpTransport> Get() {
if (!transport_) {
transport_ =
std::make_shared<CurlTransport>(GetDefaultCurlHandleFactory());
}
return transport_;
}
void Set(std::shared_ptr<HttpTransport> transport) {
transport_ = std::move(transport);
}
};
ABSL_CONST_INIT absl::Mutex global_mu(absl::kConstInit);
static GlobalTransport& GetGlobalTransport() {
static auto* g = new GlobalTransport();
return *g;
}
}
std::shared_ptr<HttpTransport> GetDefaultHttpTransport() {
absl::MutexLock l(&global_mu);
return GetGlobalTransport().Get();
}
void SetDefaultHttpTransport(std::shared_ptr<HttpTransport> t) {
absl::MutexLock l(&global_mu);
return GetGlobalTransport().Set(std::move(t));
}
}
} | #ifdef _WIN32
#undef UNICODE
#define WIN32_LEAN_AND_MEAN
#endif
#include "tensorstore/internal/http/curl_transport.h"
#include <optional>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/transport_test_utils.h"
#include "tensorstore/internal/thread/thread.h"
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::transport_test_utils::AcceptNonBlocking;
using ::tensorstore::transport_test_utils::AssertSend;
using ::tensorstore::transport_test_utils::CloseSocket;
using ::tensorstore::transport_test_utils::CreateBoundSocket;
using ::tensorstore::transport_test_utils::FormatSocketAddress;
using ::tensorstore::transport_test_utils::ReceiveAvailable;
using ::tensorstore::transport_test_utils::socket_t;
using ::testing::HasSubstr;
namespace {
class CurlTransportTest : public ::testing::Test {
public:
};
TEST_F(CurlTransportTest, Http1) {
auto transport = ::tensorstore::internal_http::GetDefaultHttpTransport();
auto socket = CreateBoundSocket();
ABSL_CHECK(socket.has_value());
auto hostport = FormatSocketAddress(*socket);
ABSL_CHECK(!hostport.empty());
static constexpr char kResponse[] =
"HTTP/1.1 200 OK\r\n"
"Content-Type: text/html\r\n"
"\r\n"
"<html>\n<body>\n<h1>Hello, World!</h1>\n</body>\n</html>\n";
std::string initial_request;
tensorstore::internal::Thread serve_thread({"serve_thread"}, [&] {
auto client_fd = AcceptNonBlocking(*socket);
ABSL_CHECK(client_fd.has_value());
initial_request = ReceiveAvailable(*client_fd);
AssertSend(*client_fd, kResponse);
CloseSocket(*client_fd);
});
auto response = transport->IssueRequest(
HttpRequestBuilder("POST", absl::StrCat("http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest(),
IssueRequestOptions(absl::Cord("Hello")));
ABSL_LOG(INFO) << response.status();
ABSL_LOG(INFO) << "Wait on server";
serve_thread.Join();
CloseSocket(*socket);
EXPECT_THAT(initial_request, HasSubstr("POST /?name=dragon&age=1234"));
EXPECT_THAT(initial_request,
HasSubstr(absl::StrCat("Host: ", hostport, "\r\n")));
EXPECT_THAT(initial_request, HasSubstr("Accept: **\r\n"));
EXPECT_THAT(request, HasSubstr("X-foo: bar\r\n"));
EXPECT_THAT(request, HasSubstr("Content-Length: 5"));
EXPECT_THAT(
request,
HasSubstr("Content-Type: application/x-www-form-urlencoded\r\n"));
EXPECT_THAT(request, HasSubstr("Hello"));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_transport.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_transport_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1ee979de-8d4e-44bc-bbbc-6a20fdeda327 | cpp | tensorflow/tensorflow | case_format | tensorflow/c/experimental/ops/gen/common/case_format.cc | tensorflow/c/experimental/ops/gen/common/case_format_test.cc | #include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "absl/strings/ascii.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
enum CaseFormatType {
LOWER_CAMEL,
UPPER_CAMEL,
LOWER_SNAKE,
UPPER_SNAKE,
};
string FormatStringCase(const string &str, CaseFormatType to,
const char delimiter = '_') {
const bool from_snake = (str == absl::AsciiStrToUpper(str)) ||
(str == absl::AsciiStrToLower(str));
const bool toUpper = (to == UPPER_CAMEL || to == UPPER_SNAKE);
const bool toSnake = (to == LOWER_SNAKE || to == UPPER_SNAKE);
string result;
bool inputStart = true;
bool wordStart = true;
for (const char c : str) {
if (c == delimiter) {
if (wordStart) {
result.push_back(delimiter);
}
wordStart = true;
continue;
}
if (!from_snake && isupper(c)) {
wordStart = true;
}
if (wordStart && toSnake && !inputStart) {
result.push_back(delimiter);
}
const bool shouldCapIfSnake = toUpper;
const bool shouldCapIfCamel = wordStart && (toUpper || !inputStart);
if ((toSnake && shouldCapIfSnake) || (!toSnake && shouldCapIfCamel)) {
result += toupper(c);
} else {
result += tolower(c);
}
wordStart = false;
inputStart = false;
}
if (wordStart) {
result.push_back(delimiter);
}
return result;
}
}
string toLowerCamel(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_CAMEL, delimiter);
}
string toLowerSnake(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_SNAKE, delimiter);
}
string toUpperCamel(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_CAMEL, delimiter);
}
string toUpperSnake(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_SNAKE, delimiter);
}
}
} | #include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
struct Variations {
string lower_camel;
string lower_snake;
string upper_camel;
string upper_snake;
};
void TestSingleVariation(const string &str, Variations expected,
char delimiter = '_') {
EXPECT_EQ(expected.lower_camel, toLowerCamel(str, delimiter));
EXPECT_EQ(expected.lower_snake, toLowerSnake(str, delimiter));
EXPECT_EQ(expected.upper_camel, toUpperCamel(str, delimiter));
EXPECT_EQ(expected.upper_snake, toUpperSnake(str, delimiter));
}
void TestAllVariations(Variations variations, char delimiter = '_') {
TestSingleVariation(variations.lower_camel, variations, delimiter);
TestSingleVariation(variations.lower_snake, variations, delimiter);
TestSingleVariation(variations.upper_camel, variations, delimiter);
TestSingleVariation(variations.upper_snake, variations, delimiter);
}
TEST(CppOpGenCaseFormat, test_single_word) {
TestAllVariations(Variations{
"three",
"three",
"Three",
"THREE",
});
}
TEST(CppOpGenCaseFormat, test_complex_string) {
TestAllVariations(Variations{
"threeNTest33Words",
"three_n_test33_words",
"ThreeNTest33Words",
"THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_hyphen_delimiter) {
TestAllVariations(
Variations{
"threeNTest33Words",
"three-n-test33-words",
"ThreeNTest33Words",
"THREE-N-TEST33-WORDS",
},
'-');
}
TEST(CppOpGenCaseFormat, test_trailing_underscore) {
TestAllVariations(Variations{
"threeNTest33Words_",
"three_n_test33_words_",
"ThreeNTest33Words_",
"THREE_N_TEST33_WORDS_",
});
}
TEST(CppOpGenCaseFormat, test_double_trailing_underscores) {
TestAllVariations(Variations{
"xxY__",
"xx_y__",
"XxY__",
"XX_Y__",
});
}
TEST(CppOpGenCaseFormat, test_leading_underscore) {
TestAllVariations(Variations{
"_threeNTest33Words",
"_three_n_test33_words",
"_ThreeNTest33Words",
"_THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_double_leading_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words",
"__three_n_test33_words",
"__ThreeNTest33Words",
"__THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_leading_and_trailing_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words____",
"__three_n_test33_words____",
"__ThreeNTest33Words____",
"__THREE_N_TEST33_WORDS____",
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/common/case_format.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/common/case_format_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b306449e-1028-4a65-a9cd-5129cd3fe38b | cpp | abseil/abseil-cpp | cordz_info | absl/strings/internal/cordz_info.cc | absl/strings/internal/cordz_info_test.cc | #include "absl/strings/internal/cordz_info.h"
#include <cstdint>
#include "absl/base/config.h"
#include "absl/base/internal/spinlock.h"
#include "absl/container/inlined_vector.h"
#include "absl/debugging/stacktrace.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cordz_handle.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t CordzInfo::kMaxStackDepth;
#endif
ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit};
namespace {
class CordRepAnalyzer {
public:
explicit CordRepAnalyzer(CordzStatistics& statistics)
: statistics_(statistics) {}
void AnalyzeCordRep(const CordRep* rep) {
ABSL_ASSERT(rep != nullptr);
size_t refcount = rep->refcount.Get();
RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};
if (repref.tag() == CRC) {
statistics_.node_count++;
statistics_.node_counts.crc++;
memory_usage_.Add(sizeof(CordRepCrc), repref.refcount);
repref = repref.Child(repref.rep->crc()->child);
}
repref = CountLinearReps(repref, memory_usage_);
switch (repref.tag()) {
case CordRepKind::BTREE:
AnalyzeBtree(repref);
break;
default:
ABSL_ASSERT(repref.tag() == CordRepKind::UNUSED_0);
break;
}
statistics_.estimated_memory_usage += memory_usage_.total;
statistics_.estimated_fair_share_memory_usage +=
static_cast<size_t>(memory_usage_.fair_share);
}
private:
struct RepRef {
const CordRep* rep;
size_t refcount;
RepRef Child(const CordRep* child) const {
if (child == nullptr) return RepRef{nullptr, 0};
return RepRef{child, refcount * child->refcount.Get()};
}
constexpr CordRepKind tag() const {
ABSL_ASSERT(rep == nullptr || rep->tag != CordRepKind::UNUSED_0);
return rep ? static_cast<CordRepKind>(rep->tag) : CordRepKind::UNUSED_0;
}
};
struct MemoryUsage {
size_t total = 0;
double fair_share = 0.0;
void Add(size_t size, size_t refcount) {
total += size;
fair_share += static_cast<double>(size) / refcount;
}
};
void CountFlat(size_t size) {
statistics_.node_count++;
statistics_.node_counts.flat++;
if (size <= 64) {
statistics_.node_counts.flat_64++;
} else if (size <= 128) {
statistics_.node_counts.flat_128++;
} else if (size <= 256) {
statistics_.node_counts.flat_256++;
} else if (size <= 512) {
statistics_.node_counts.flat_512++;
} else if (size <= 1024) {
statistics_.node_counts.flat_1k++;
}
}
RepRef CountLinearReps(RepRef rep, MemoryUsage& memory_usage) {
while (rep.tag() == SUBSTRING) {
statistics_.node_count++;
statistics_.node_counts.substring++;
memory_usage.Add(sizeof(CordRepSubstring), rep.refcount);
rep = rep.Child(rep.rep->substring()->child);
}
if (rep.tag() >= FLAT) {
size_t size = rep.rep->flat()->AllocatedSize();
CountFlat(size);
memory_usage.Add(size, rep.refcount);
return RepRef{nullptr, 0};
}
if (rep.tag() == EXTERNAL) {
statistics_.node_count++;
statistics_.node_counts.external++;
size_t size = rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
memory_usage.Add(size, rep.refcount);
return RepRef{nullptr, 0};
}
return rep;
}
void AnalyzeBtree(RepRef rep) {
statistics_.node_count++;
statistics_.node_counts.btree++;
memory_usage_.Add(sizeof(CordRepBtree), rep.refcount);
const CordRepBtree* tree = rep.rep->btree();
if (tree->height() > 0) {
for (CordRep* edge : tree->Edges()) {
AnalyzeBtree(rep.Child(edge));
}
} else {
for (CordRep* edge : tree->Edges()) {
CountLinearReps(rep.Child(edge), memory_usage_);
}
}
}
CordzStatistics& statistics_;
MemoryUsage memory_usage_;
};
}
CordzInfo* CordzInfo::Head(const CordzSnapshot& snapshot) {
ABSL_ASSERT(snapshot.is_snapshot());
CordzInfo* head = global_list_.head.load(std::memory_order_acquire);
ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(head));
return head;
}
CordzInfo* CordzInfo::Next(const CordzSnapshot& snapshot) const {
ABSL_ASSERT(snapshot.is_snapshot());
CordzInfo* next = ci_next_.load(std::memory_order_acquire);
ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(this));
ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(next));
return next;
}
void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method,
int64_t sampling_stride) {
assert(cord.is_tree());
assert(!cord.is_profiled());
CordzInfo* cordz_info =
new CordzInfo(cord.as_tree(), nullptr, method, sampling_stride);
cord.set_cordz_info(cordz_info);
cordz_info->Track();
}
void CordzInfo::TrackCord(InlineData& cord, const InlineData& src,
MethodIdentifier method) {
assert(cord.is_tree());
assert(src.is_tree());
CordzInfo* cordz_info = cord.cordz_info();
if (cordz_info != nullptr) cordz_info->Untrack();
cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method,
src.cordz_info()->sampling_stride());
cord.set_cordz_info(cordz_info);
cordz_info->Track();
}
void CordzInfo::MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
MethodIdentifier method) {
if (src.is_profiled()) {
TrackCord(cord, src, method);
} else if (cord.is_profiled()) {
cord.cordz_info()->Untrack();
cord.clear_cordz_info();
}
}
CordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) {
if (src == nullptr) return MethodIdentifier::kUnknown;
return src->parent_method_ != MethodIdentifier::kUnknown ? src->parent_method_
: src->method_;
}
size_t CordzInfo::FillParentStack(const CordzInfo* src, void** stack) {
assert(stack);
if (src == nullptr) return 0;
if (src->parent_stack_depth_) {
memcpy(stack, src->parent_stack_, src->parent_stack_depth_ * sizeof(void*));
return src->parent_stack_depth_;
}
memcpy(stack, src->stack_, src->stack_depth_ * sizeof(void*));
return src->stack_depth_;
}
CordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src,
MethodIdentifier method, int64_t sampling_stride)
: rep_(rep),
stack_depth_(
static_cast<size_t>(absl::GetStackTrace(stack_,
kMaxStackDepth,
1))),
parent_stack_depth_(FillParentStack(src, parent_stack_)),
method_(method),
parent_method_(GetParentMethod(src)),
create_time_(absl::Now()),
sampling_stride_(sampling_stride) {
update_tracker_.LossyAdd(method);
if (src) {
update_tracker_.LossyAdd(src->update_tracker_);
}
}
CordzInfo::~CordzInfo() {
if (ABSL_PREDICT_FALSE(rep_)) {
CordRep::Unref(rep_);
}
}
void CordzInfo::Track() {
SpinLockHolder l(&list_->mutex);
CordzInfo* const head = list_->head.load(std::memory_order_acquire);
if (head != nullptr) {
head->ci_prev_.store(this, std::memory_order_release);
}
ci_next_.store(head, std::memory_order_release);
list_->head.store(this, std::memory_order_release);
}
void CordzInfo::Untrack() {
ODRCheck();
{
SpinLockHolder l(&list_->mutex);
CordzInfo* const head = list_->head.load(std::memory_order_acquire);
CordzInfo* const next = ci_next_.load(std::memory_order_acquire);
CordzInfo* const prev = ci_prev_.load(std::memory_order_acquire);
if (next) {
ABSL_ASSERT(next->ci_prev_.load(std::memory_order_acquire) == this);
next->ci_prev_.store(prev, std::memory_order_release);
}
if (prev) {
ABSL_ASSERT(head != this);
ABSL_ASSERT(prev->ci_next_.load(std::memory_order_acquire) == this);
prev->ci_next_.store(next, std::memory_order_release);
} else {
ABSL_ASSERT(head == this);
list_->head.store(next, std::memory_order_release);
}
}
if (SafeToDelete()) {
UnsafeSetCordRep(nullptr);
delete this;
return;
}
{
absl::MutexLock lock(&mutex_);
if (rep_) CordRep::Ref(rep_);
}
CordzHandle::Delete(this);
}
void CordzInfo::Lock(MethodIdentifier method)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_) {
mutex_.Lock();
update_tracker_.LossyAdd(method);
assert(rep_);
}
void CordzInfo::Unlock() ABSL_UNLOCK_FUNCTION(mutex_) {
bool tracked = rep_ != nullptr;
mutex_.Unlock();
if (!tracked) {
Untrack();
}
}
absl::Span<void* const> CordzInfo::GetStack() const {
return absl::MakeConstSpan(stack_, stack_depth_);
}
absl::Span<void* const> CordzInfo::GetParentStack() const {
return absl::MakeConstSpan(parent_stack_, parent_stack_depth_);
}
CordzStatistics CordzInfo::GetCordzStatistics() const {
CordzStatistics stats;
stats.method = method_;
stats.parent_method = parent_method_;
stats.update_tracker = update_tracker_;
if (CordRep* rep = RefCordRep()) {
stats.size = rep->length;
CordRepAnalyzer analyzer(stats);
analyzer.AnalyzeCordRep(rep);
CordRep::Unref(rep);
}
return stats;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cordz_info.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/strings/cordz_test_helpers.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_handle.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Ne;
using ::testing::SizeIs;
auto constexpr kUnknownMethod = CordzUpdateTracker::kUnknown;
auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;
auto constexpr kChildMethod = CordzUpdateTracker::kConstructorCord;
auto constexpr kUpdateMethod = CordzUpdateTracker::kAppendString;
std::vector<const CordzHandle*> DeleteQueue() {
return CordzHandle::DiagnosticsGetDeleteQueue();
}
std::string FormatStack(absl::Span<void* const> raw_stack) {
static constexpr size_t buf_size = 1 << 14;
std::unique_ptr<char[]> buf(new char[buf_size]);
std::string output;
for (void* stackp : raw_stack) {
if (absl::Symbolize(stackp, buf.get(), buf_size)) {
absl::StrAppend(&output, " ", buf.get(), "\n");
}
}
return output;
}
TEST(CordzInfoTest, TrackCord) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
ASSERT_THAT(info, Ne(nullptr));
EXPECT_FALSE(info->is_snapshot());
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
info->Untrack();
}
TEST(CordzInfoTest, MaybeTrackChildCordWithoutSampling) {
CordzSamplingIntervalHelper sample_none(99999);
TestCordData parent, child;
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, MaybeTrackChildCordWithSampling) {
CordzSamplingIntervalHelper sample_all(1);
TestCordData parent, child;
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingParentSampled) {
CordzSamplingIntervalHelper sample_none(99999);
TestCordData parent, child;
CordzInfo::TrackCord(parent.data, kTrackCordMethod, 1);
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
CordzInfo* parent_info = parent.data.cordz_info();
CordzInfo* child_info = child.data.cordz_info();
ASSERT_THAT(child_info, Ne(nullptr));
EXPECT_THAT(child_info->GetCordRepForTesting(), Eq(child.rep.rep));
EXPECT_THAT(child_info->GetParentStack(), parent_info->GetStack());
parent_info->Untrack();
child_info->Untrack();
}
TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingChildSampled) {
CordzSamplingIntervalHelper sample_none(99999);
TestCordData parent, child;
CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, MaybeTrackChildCordWithSamplingChildSampled) {
CordzSamplingIntervalHelper sample_all(1);
TestCordData parent, child;
CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, UntrackCord) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
info->Untrack();
EXPECT_THAT(DeleteQueue(), SizeIs(0u));
}
TEST(CordzInfoTest, UntrackCordWithSnapshot) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
CordzSnapshot snapshot;
info->Untrack();
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));
EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
EXPECT_THAT(DeleteQueue(), ElementsAre(info, &snapshot));
}
TEST(CordzInfoTest, SetCordRep) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
TestCordRep rep;
info->Lock(CordzUpdateTracker::kAppendCord);
info->SetCordRep(rep.rep);
info->Unlock();
EXPECT_THAT(info->GetCordRepForTesting(), Eq(rep.rep));
info->Untrack();
}
TEST(CordzInfoTest, SetCordRepNullUntracksCordOnUnlock) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
info->Lock(CordzUpdateTracker::kAppendString);
info->SetCordRep(nullptr);
EXPECT_THAT(info->GetCordRepForTesting(), Eq(nullptr));
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
info->Unlock();
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));
}
TEST(CordzInfoTest, RefCordRep) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
size_t refcount = data.rep.rep->refcount.Get();
EXPECT_THAT(info->RefCordRep(), Eq(data.rep.rep));
EXPECT_THAT(data.rep.rep->refcount.Get(), Eq(refcount + 1));
CordRep::Unref(data.rep.rep);
info->Untrack();
}
#if GTEST_HAS_DEATH_TEST
TEST(CordzInfoTest, SetCordRepRequiresMutex) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
TestCordRep rep;
EXPECT_DEBUG_DEATH(info->SetCordRep(rep.rep), ".*");
info->Untrack();
}
#endif
TEST(CordzInfoTest, TrackUntrackHeadFirstV2) {
CordzSnapshot snapshot;
EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info1 = data.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
TestCordData data2;
CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);
CordzInfo* info2 = data2.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
EXPECT_THAT(info2->Next(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
info2->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
info1->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
}
TEST(CordzInfoTest, TrackUntrackTailFirstV2) {
CordzSnapshot snapshot;
EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info1 = data.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
TestCordData data2;
CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);
CordzInfo* info2 = data2.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
EXPECT_THAT(info2->Next(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
info1->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
EXPECT_THAT(info2->Next(snapshot), Eq(nullptr));
info2->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
}
TEST(CordzInfoTest, StackV2) {
TestCordData data;
static constexpr int kMaxStackDepth = 50;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
std::vector<void*> local_stack;
local_stack.resize(kMaxStackDepth);
local_stack.resize(static_cast<size_t>(
absl::GetStackTrace(local_stack.data(), kMaxStackDepth,
1)));
std::string got_stack = FormatStack(info->GetStack());
std::string expected_stack = FormatStack(local_stack);
EXPECT_THAT(got_stack, HasSubstr(expected_stack));
info->Untrack();
}
CordzInfo* TrackChildCord(InlineData& data, const InlineData& parent) {
CordzInfo::TrackCord(data, parent, kChildMethod);
return data.cordz_info();
}
CordzInfo* TrackParentCord(InlineData& data) {
CordzInfo::TrackCord(data, kTrackCordMethod, 1);
return data.cordz_info();
}
TEST(CordzInfoTest, GetStatistics) {
TestCordData data;
CordzInfo* info = TrackParentCord(data.data);
CordzStatistics statistics = info->GetCordzStatistics();
EXPECT_THAT(statistics.size, Eq(data.rep.rep->length));
EXPECT_THAT(statistics.method, Eq(kTrackCordMethod));
EXPECT_THAT(statistics.parent_method, Eq(kUnknownMethod));
EXPECT_THAT(statistics.update_tracker.Value(kTrackCordMethod), Eq(1));
info->Untrack();
}
TEST(CordzInfoTest, LockCountsMethod) {
TestCordData data;
CordzInfo* info = TrackParentCord(data.data);
info->Lock(kUpdateMethod);
info->Unlock();
info->Lock(kUpdateMethod);
info->Unlock();
CordzStatistics statistics = info->GetCordzStatistics();
EXPECT_THAT(statistics.update_tracker.Value(kUpdateMethod), Eq(2));
info->Untrack();
}
TEST(CordzInfoTest, FromParent) {
TestCordData parent;
TestCordData child;
CordzInfo* info_parent = TrackParentCord(parent.data);
CordzInfo* info_child = TrackChildCord(child.data, parent.data);
std::string stack = FormatStack(info_parent->GetStack());
std::string parent_stack = FormatStack(info_child->GetParentStack());
EXPECT_THAT(stack, Eq(parent_stack));
CordzStatistics statistics = info_child->GetCordzStatistics();
EXPECT_THAT(statistics.size, Eq(child.rep.rep->length));
EXPECT_THAT(statistics.method, Eq(kChildMethod));
EXPECT_THAT(statistics.parent_method, Eq(kTrackCordMethod));
EXPECT_THAT(statistics.update_tracker.Value(kChildMethod), Eq(1));
info_parent->Untrack();
info_child->Untrack();
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_info.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_info_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
b838e648-4281-4f28-8219-c370a373b8cf | cpp | google/quiche | btree_scheduler | quiche/common/btree_scheduler.h | quiche/common/btree_scheduler_test.cc | #ifndef QUICHE_COMMON_BTREE_SCHEDULER_H_
#define QUICHE_COMMON_BTREE_SCHEDULER_H_
#include <cstddef>
#include <limits>
#include <optional>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/btree_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
template <typename Id, typename Priority>
class QUICHE_NO_EXPORT BTreeScheduler {
public:
bool HasRegistered() const { return !streams_.empty(); }
bool HasScheduled() const { return !schedule_.empty(); }
size_t NumScheduled() const { return schedule_.size(); }
size_t NumRegistered() const { return streams_.size(); }
size_t NumScheduledInPriorityRange(std::optional<Priority> min,
std::optional<Priority> max) const;
absl::StatusOr<bool> ShouldYield(Id id) const;
std::optional<Priority> GetPriorityFor(Id id) const {
auto it = streams_.find(id);
if (it == streams_.end()) {
return std::nullopt;
}
return it->second.priority;
}
absl::StatusOr<Id> PopFront();
absl::Status Register(Id stream_id, const Priority& priority);
absl::Status Unregister(Id stream_id);
absl::Status UpdatePriority(Id stream_id, const Priority& new_priority);
absl::Status Schedule(Id stream_id);
bool IsScheduled(Id stream_id) const;
private:
struct StreamEntry {
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Priority priority;
std::optional<int> current_sequence_number = std::nullopt;
bool scheduled() const { return current_sequence_number.has_value(); }
};
using FullStreamEntry = std::pair<const Id, StreamEntry>;
struct ScheduleKey {
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Priority priority;
int sequence_number;
bool operator<(const ScheduleKey& other) const {
return std::make_tuple(priority, sequence_number) >
std::make_tuple(other.priority, other.sequence_number);
}
static ScheduleKey MinForPriority(Priority priority) {
return ScheduleKey{priority, std::numeric_limits<int>::max()};
}
static ScheduleKey MaxForPriority(Priority priority) {
return ScheduleKey{priority, std::numeric_limits<int>::min()};
}
};
using FullScheduleEntry = std::pair<const ScheduleKey, FullStreamEntry*>;
using ScheduleIterator =
typename absl::btree_map<ScheduleKey, FullStreamEntry*>::const_iterator;
static Id StreamId(const FullScheduleEntry& entry) {
return entry.second->first;
}
absl::StatusOr<FullScheduleEntry> DescheduleStream(const StreamEntry& entry);
absl::node_hash_map<Id, StreamEntry> streams_;
absl::btree_map<ScheduleKey, FullStreamEntry*> schedule_;
int current_write_sequence_number_ = 0;
};
template <typename Id, typename Priority>
size_t BTreeScheduler<Id, Priority>::NumScheduledInPriorityRange(
std::optional<Priority> min, std::optional<Priority> max) const {
if (min.has_value() && max.has_value()) {
QUICHE_DCHECK(*min <= *max);
}
ScheduleIterator begin =
max.has_value() ? schedule_.lower_bound(ScheduleKey::MinForPriority(*max))
: schedule_.begin();
ScheduleIterator end =
min.has_value() ? schedule_.upper_bound(ScheduleKey::MaxForPriority(*min))
: schedule_.end();
return end - begin;
}
template <typename Id, typename Priority>
absl::Status BTreeScheduler<Id, Priority>::Register(Id stream_id,
const Priority& priority) {
auto [it, success] = streams_.insert({stream_id, StreamEntry{priority}});
if (!success) {
return absl::AlreadyExistsError("ID already registered");
}
return absl::OkStatus();
}
template <typename Id, typename Priority>
auto BTreeScheduler<Id, Priority>::DescheduleStream(const StreamEntry& entry)
-> absl::StatusOr<FullScheduleEntry> {
QUICHE_DCHECK(entry.scheduled());
auto it = schedule_.find(
ScheduleKey{entry.priority, *entry.current_sequence_number});
if (it == schedule_.end()) {
return absl::InternalError(
"Calling DescheduleStream() on an entry that is not in the schedule at "
"the expected key.");
}
FullScheduleEntry result = *it;
schedule_.erase(it);
return result;
}
template <typename Id, typename Priority>
absl::Status BTreeScheduler<Id, Priority>::Unregister(Id stream_id) {
auto it = streams_.find(stream_id);
if (it == streams_.end()) {
return absl::NotFoundError("Stream not registered");
}
const StreamEntry& stream = it->second;
if (stream.scheduled()) {
if (!DescheduleStream(stream).ok()) {
QUICHE_BUG(BTreeSchedule_Unregister_NotInSchedule)
<< "UnregisterStream() called on a stream ID " << stream_id
<< ", which is marked ready, but is not in the schedule";
}
}
streams_.erase(it);
return absl::OkStatus();
}
template <typename Id, typename Priority>
absl::Status BTreeScheduler<Id, Priority>::UpdatePriority(
Id stream_id, const Priority& new_priority) {
auto it = streams_.find(stream_id);
if (it == streams_.end()) {
return absl::NotFoundError("ID not registered");
}
StreamEntry& stream = it->second;
std::optional<int> sequence_number;
if (stream.scheduled()) {
absl::StatusOr<FullScheduleEntry> old_entry = DescheduleStream(stream);
if (old_entry.ok()) {
sequence_number = old_entry->first.sequence_number;
QUICHE_DCHECK_EQ(old_entry->second, &*it);
} else {
QUICHE_BUG(BTreeScheduler_Update_Not_In_Schedule)
<< "UpdatePriority() called on a stream ID " << stream_id
<< ", which is marked ready, but is not in the schedule";
}
}
stream.priority = new_priority;
if (sequence_number.has_value()) {
schedule_.insert({ScheduleKey{stream.priority, *sequence_number}, &*it});
}
return absl::OkStatus();
}
template <typename Id, typename Priority>
absl::StatusOr<bool> BTreeScheduler<Id, Priority>::ShouldYield(
Id stream_id) const {
const auto stream_it = streams_.find(stream_id);
if (stream_it == streams_.end()) {
return absl::NotFoundError("ID not registered");
}
const StreamEntry& stream = stream_it->second;
if (schedule_.empty()) {
return false;
}
const FullScheduleEntry& next = *schedule_.begin();
if (StreamId(next) == stream_id) {
return false;
}
return next.first.priority >= stream.priority;
}
template <typename Id, typename Priority>
absl::StatusOr<Id> BTreeScheduler<Id, Priority>::PopFront() {
if (schedule_.empty()) {
return absl::NotFoundError("No streams scheduled");
}
auto schedule_it = schedule_.begin();
QUICHE_DCHECK(schedule_it->second->second.scheduled());
schedule_it->second->second.current_sequence_number = std::nullopt;
Id result = StreamId(*schedule_it);
schedule_.erase(schedule_it);
return result;
}
template <typename Id, typename Priority>
absl::Status BTreeScheduler<Id, Priority>::Schedule(Id stream_id) {
const auto stream_it = streams_.find(stream_id);
if (stream_it == streams_.end()) {
return absl::NotFoundError("ID not registered");
}
if (stream_it->second.scheduled()) {
return absl::OkStatus();
}
auto [schedule_it, success] =
schedule_.insert({ScheduleKey{stream_it->second.priority,
--current_write_sequence_number_},
&*stream_it});
QUICHE_BUG_IF(WebTransportWriteBlockedList_AddStream_conflict, !success)
<< "Conflicting key in scheduler for stream " << stream_id;
stream_it->second.current_sequence_number =
schedule_it->first.sequence_number;
return absl::OkStatus();
}
template <typename Id, typename Priority>
bool BTreeScheduler<Id, Priority>::IsScheduled(Id stream_id) const {
const auto stream_it = streams_.find(stream_id);
if (stream_it == streams_.end()) {
return false;
}
return stream_it->second.scheduled();
}
}
#endif | #include "quiche/common/btree_scheduler.h"
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quiche::test {
namespace {
using ::testing::ElementsAre;
using ::testing::Optional;
template <typename Id, typename Priority>
void ScheduleIds(BTreeScheduler<Id, Priority>& scheduler,
absl::Span<const Id> ids) {
for (Id id : ids) {
QUICHE_EXPECT_OK(scheduler.Schedule(id));
}
}
template <typename Id, typename Priority>
std::vector<Id> PopAll(BTreeScheduler<Id, Priority>& scheduler) {
std::vector<Id> result;
result.reserve(scheduler.NumScheduled());
for (;;) {
absl::StatusOr<Id> id = scheduler.PopFront();
if (id.ok()) {
result.push_back(*id);
} else {
EXPECT_THAT(id, StatusIs(absl::StatusCode::kNotFound));
break;
}
}
return result;
}
TEST(BTreeSchedulerTest, SimplePop) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 100));
QUICHE_EXPECT_OK(scheduler.Register(2, 101));
QUICHE_EXPECT_OK(scheduler.Register(3, 102));
EXPECT_THAT(scheduler.GetPriorityFor(1), Optional(100));
EXPECT_THAT(scheduler.GetPriorityFor(3), Optional(102));
EXPECT_EQ(scheduler.GetPriorityFor(5), std::nullopt);
EXPECT_EQ(scheduler.NumScheduled(), 0u);
EXPECT_FALSE(scheduler.HasScheduled());
QUICHE_EXPECT_OK(scheduler.Schedule(1));
QUICHE_EXPECT_OK(scheduler.Schedule(2));
QUICHE_EXPECT_OK(scheduler.Schedule(3));
EXPECT_EQ(scheduler.NumScheduled(), 3u);
EXPECT_TRUE(scheduler.HasScheduled());
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(3));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(2));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(1));
QUICHE_EXPECT_OK(scheduler.Schedule(2));
QUICHE_EXPECT_OK(scheduler.Schedule(1));
QUICHE_EXPECT_OK(scheduler.Schedule(3));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(3));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(2));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(1));
QUICHE_EXPECT_OK(scheduler.Schedule(3));
QUICHE_EXPECT_OK(scheduler.Schedule(1));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(3));
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(1));
}
TEST(BTreeSchedulerTest, FIFO) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 100));
QUICHE_EXPECT_OK(scheduler.Register(2, 100));
QUICHE_EXPECT_OK(scheduler.Register(3, 100));
ScheduleIds(scheduler, {2, 1, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(2, 1, 3));
QUICHE_EXPECT_OK(scheduler.Register(4, 101));
QUICHE_EXPECT_OK(scheduler.Register(5, 99));
ScheduleIds(scheduler, {5, 1, 2, 3, 4});
EXPECT_THAT(PopAll(scheduler), ElementsAre(4, 1, 2, 3, 5));
ScheduleIds(scheduler, {1, 5, 2, 4, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(4, 1, 2, 3, 5));
ScheduleIds(scheduler, {3, 5, 2, 4, 1});
EXPECT_THAT(PopAll(scheduler), ElementsAre(4, 3, 2, 1, 5));
ScheduleIds(scheduler, {3, 2, 1, 2, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 2, 1));
}
TEST(BTreeSchedulerTest, NumEntriesInRange) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 0));
QUICHE_EXPECT_OK(scheduler.Register(2, 0));
QUICHE_EXPECT_OK(scheduler.Register(3, 0));
QUICHE_EXPECT_OK(scheduler.Register(4, -2));
QUICHE_EXPECT_OK(scheduler.Register(5, -5));
QUICHE_EXPECT_OK(scheduler.Register(6, 10));
QUICHE_EXPECT_OK(scheduler.Register(7, 16));
QUICHE_EXPECT_OK(scheduler.Register(8, 32));
QUICHE_EXPECT_OK(scheduler.Register(9, 64));
EXPECT_EQ(scheduler.NumScheduled(), 0u);
EXPECT_EQ(scheduler.NumScheduledInPriorityRange(std::nullopt, std::nullopt),
0u);
EXPECT_EQ(scheduler.NumScheduledInPriorityRange(-1, 1), 0u);
for (int stream = 1; stream <= 9; ++stream) {
QUICHE_ASSERT_OK(scheduler.Schedule(stream));
}
EXPECT_EQ(scheduler.NumScheduled(), 9u);
EXPECT_EQ(scheduler.NumScheduledInPriorityRange(std::nullopt, std::nullopt),
9u);
EXPECT_EQ(scheduler.NumScheduledInPriorityRange(0, 0), 3u);
EXPECT_EQ(scheduler.NumScheduledInPriorityRange(std::nullopt, -1), 2u);
EXPECT_EQ(scheduler.NumScheduledInPriorityRange(1, std::nullopt), 4u);
}
TEST(BTreeSchedulerTest, Registration) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 0));
QUICHE_EXPECT_OK(scheduler.Register(2, 0));
QUICHE_EXPECT_OK(scheduler.Schedule(1));
QUICHE_EXPECT_OK(scheduler.Schedule(2));
EXPECT_EQ(scheduler.NumScheduled(), 2u);
EXPECT_TRUE(scheduler.IsScheduled(2));
EXPECT_THAT(scheduler.Register(2, 0),
StatusIs(absl::StatusCode::kAlreadyExists));
QUICHE_EXPECT_OK(scheduler.Unregister(2));
EXPECT_EQ(scheduler.NumScheduled(), 1u);
EXPECT_FALSE(scheduler.IsScheduled(2));
EXPECT_THAT(scheduler.UpdatePriority(2, 1234),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(scheduler.Unregister(2), StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(scheduler.Schedule(2), StatusIs(absl::StatusCode::kNotFound));
QUICHE_EXPECT_OK(scheduler.Register(2, 0));
EXPECT_EQ(scheduler.NumScheduled(), 1u);
EXPECT_TRUE(scheduler.IsScheduled(1));
EXPECT_FALSE(scheduler.IsScheduled(2));
}
TEST(BTreeSchedulerTest, UpdatePriorityUp) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 0));
QUICHE_EXPECT_OK(scheduler.Register(2, 0));
QUICHE_EXPECT_OK(scheduler.Register(3, 0));
ScheduleIds(scheduler, {1, 2, 3});
QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, 1000));
EXPECT_THAT(PopAll(scheduler), ElementsAre(2, 1, 3));
}
TEST(BTreeSchedulerTest, UpdatePriorityDown) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 0));
QUICHE_EXPECT_OK(scheduler.Register(2, 0));
QUICHE_EXPECT_OK(scheduler.Register(3, 0));
ScheduleIds(scheduler, {1, 2, 3});
QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, -1000));
EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 3, 2));
}
TEST(BTreeSchedulerTest, UpdatePriorityEqual) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 0));
QUICHE_EXPECT_OK(scheduler.Register(2, 0));
QUICHE_EXPECT_OK(scheduler.Register(3, 0));
ScheduleIds(scheduler, {1, 2, 3});
QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, 0));
EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 2, 3));
}
TEST(BTreeSchedulerTest, UpdatePriorityIntoSameBucket) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(1, 0));
QUICHE_EXPECT_OK(scheduler.Register(2, -100));
QUICHE_EXPECT_OK(scheduler.Register(3, 0));
ScheduleIds(scheduler, {1, 2, 3});
QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, 0));
EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 2, 3));
}
TEST(BTreeSchedulerTest, ShouldYield) {
BTreeScheduler<int, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(10, 100));
QUICHE_EXPECT_OK(scheduler.Register(20, 101));
QUICHE_EXPECT_OK(scheduler.Register(21, 101));
QUICHE_EXPECT_OK(scheduler.Register(30, 102));
EXPECT_THAT(scheduler.ShouldYield(10), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(20), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(21), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(30), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(40), StatusIs(absl::StatusCode::kNotFound));
QUICHE_EXPECT_OK(scheduler.Schedule(20));
EXPECT_THAT(scheduler.ShouldYield(10), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(20), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(21), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(30), IsOkAndHolds(false));
}
struct CustomPriority {
int a;
int b;
bool operator<(const CustomPriority& other) const {
return std::make_tuple(a, b) < std::make_tuple(other.a, other.b);
}
};
TEST(BTreeSchedulerTest, CustomPriority) {
BTreeScheduler<int, CustomPriority> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(10, CustomPriority{0, 1}));
QUICHE_EXPECT_OK(scheduler.Register(11, CustomPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(12, CustomPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(13, CustomPriority{10, 0}));
QUICHE_EXPECT_OK(scheduler.Register(14, CustomPriority{-10, 0}));
ScheduleIds(scheduler, {10, 11, 12, 13, 14});
EXPECT_THAT(PopAll(scheduler), ElementsAre(13, 10, 11, 12, 14));
}
struct CustomId {
int a;
std::string b;
bool operator==(const CustomId& other) const {
return a == other.a && b == other.b;
}
template <typename H>
friend H AbslHashValue(H h, const CustomId& c) {
return H::combine(std::move(h), c.a, c.b);
}
};
std::ostream& operator<<(std::ostream& os, const CustomId& id) {
os << id.a << ":" << id.b;
return os;
}
TEST(BTreeSchedulerTest, CustomIds) {
BTreeScheduler<CustomId, int> scheduler;
QUICHE_EXPECT_OK(scheduler.Register(CustomId{1, "foo"}, 10));
QUICHE_EXPECT_OK(scheduler.Register(CustomId{1, "bar"}, 12));
QUICHE_EXPECT_OK(scheduler.Register(CustomId{2, "foo"}, 11));
EXPECT_THAT(scheduler.Register(CustomId{1, "foo"}, 10),
StatusIs(absl::StatusCode::kAlreadyExists));
ScheduleIds(scheduler,
{CustomId{1, "foo"}, CustomId{1, "bar"}, CustomId{2, "foo"}});
EXPECT_THAT(scheduler.ShouldYield(CustomId{1, "foo"}), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(CustomId{1, "bar"}), IsOkAndHolds(false));
EXPECT_THAT(
PopAll(scheduler),
ElementsAre(CustomId{1, "bar"}, CustomId{2, "foo"}, CustomId{1, "foo"}));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/btree_scheduler.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/btree_scheduler_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7bd23866-5039-481b-92b9-5f3198d60d37 | cpp | tensorflow/tensorflow | reshapex4 | tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc | tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/reshapex4.h"
#include <string>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GetReshapeCode(const OperationDef& op_def) {
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int dst_bhwc4 = B;\n";
} else {
c += " int dst_bhwc4 = 0;\n";
}
c += " dst_bhwc4 = ((dst_bhwc4 * args.dst_tensor.Height() + Y) * "
"args.dst_tensor.Width() + X) * args.dst_tensor.Slices() + Z;\n";
c += " int src_z = dst_bhwc4 % args.src_tensor.Slices();\n";
c += " dst_bhwc4 = dst_bhwc4 / args.src_tensor.Slices();\n";
c += " int src_x = dst_bhwc4 % args.src_tensor.Width();\n";
c += " dst_bhwc4 = dst_bhwc4 / args.src_tensor.Width();\n";
c += " int src_y = dst_bhwc4 % args.src_tensor.Height();\n";
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int src_b = dst_bhwc4 / args.src_tensor.Height();\n";
c += " args.src_tensor.SetBatchRef(src_b);\n";
}
c += " args.src_tensor::type result = args.src_tensor.Read(src_x, src_y, "
"src_z);\n";
c += " args.dst_tensor.Write(result, X, Y, Z);\n";
c += "}\n";
return c;
}
}
GPUOperation CreateReshapex4(const OperationDef& definition) {
GPUOperation op(definition);
op.AddSrcTensor("src_tensor", definition.src_tensors[0]);
op.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
op.code_ = GetReshapeCode(definition);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
return op;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/reshape_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Reshapex4) {
auto status = Reshapex4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
06a605f6-b2f0-43a5-adfd-11795f4448f4 | cpp | tensorflow/tensorflow | scatter | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.cc | third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h"
#include <cstdint>
#include <type_traits>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
LogicalResult CanonicalizeScatterUpdates(
Operation* scatter_op, llvm::ArrayRef<int64_t> update_window_dims,
const Value& indices, const ShapedType& indices_type, Value& updates,
ShapedType& updates_type, ConversionPatternRewriter& rewriter) {
auto canonical_update_window_dims = llvm::to_vector(
llvm::seq<int64_t>(indices_type.getRank() - 1, updates_type.getRank()));
if (canonical_update_window_dims == update_window_dims) return success();
if (!IsIotaAttr(update_window_dims, update_window_dims.size()))
return rewriter.notifyMatchFailure(
scatter_op, "update_window_dims are not leading or trailing indices");
SmallVector<int64_t, 4> permutation_array(updates_type.getRank());
int64_t dim = 0;
const auto permutation_array_size = permutation_array.size();
for (int64_t i = update_window_dims.size(); i < permutation_array_size; ++i) {
permutation_array[i] = dim;
++dim;
}
for (int64_t i = 0; i < update_window_dims.size(); ++i) {
permutation_array[i] = dim;
++dim;
}
auto permutation_and_shape = GetPermutationAndTransposedShape(
permutation_array, updates_type, rewriter);
auto transposed_updates = rewriter.create<mhlo::TransposeOp>(
scatter_op->getLoc(), permutation_and_shape.shape, updates,
permutation_and_shape.permutation);
updates = transposed_updates;
updates_type = permutation_and_shape.shape;
return success();
}
template <typename BinaryOp, typename TfOp>
LogicalResult ConvertScatterOp<BinaryOp, TfOp>::matchAndRewrite(
mhlo::ScatterOp scatter_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
OperandRange operands = scatter_op.getInputs();
Value indices = scatter_op.getScatterIndices();
OperandRange updates = scatter_op.getUpdates();
if (operands.size() != 1 || updates.size() != 1) return failure();
ShapedType operand_type = mlir::cast<ShapedType>(operands[0].getType());
ShapedType indices_type = mlir::cast<ShapedType>(indices.getType());
ShapedType updates_type = mlir::cast<ShapedType>(updates[0].getType());
Value new_updates = updates[0];
if (!operand_type.hasStaticShape() || !indices_type.hasStaticShape() ||
!updates_type.hasStaticShape()) {
return failure();
}
if (failed(MatchBinaryReduceFunction<BinaryOp>(
scatter_op.getUpdateComputation()))) {
return failure();
}
auto scatter_dimension_numbers = scatter_op.getScatterDimensionNumbers();
int64_t index_vector_dim = scatter_dimension_numbers.getIndexVectorDim();
if (failed(NormalizeIndexVector(scatter_op, indices, indices_type,
index_vector_dim, rewriter))) {
return failure();
}
auto update_window_dims = scatter_dimension_numbers.getUpdateWindowDims();
if (failed(CanonicalizeScatterUpdates(scatter_op, update_window_dims, indices,
indices_type, new_updates, updates_type,
rewriter))) {
return failure();
}
auto inserted_window_dims = scatter_dimension_numbers.getInsertedWindowDims();
auto scatter_dims_to_operand_dims =
scatter_dimension_numbers.getScatterDimsToOperandDims();
if (IsIotaAttr(inserted_window_dims, indices_type.getShape().back()) &&
IsIotaAttr(scatter_dims_to_operand_dims,
indices_type.getShape().back())) {
rewriter.replaceOpWithNewOp<TfOp>(scatter_op,
scatter_op.getResult(0).getType(),
operands[0], indices, new_updates);
return success();
}
if (scatter_dims_to_operand_dims != inserted_window_dims) {
return rewriter.notifyMatchFailure(
scatter_op, "unsupported scatter_dims_to_operand_dims");
}
SmallVector<int64_t, 4> permutation_array;
for (int64_t i = 0; i < scatter_dims_to_operand_dims.size(); ++i) {
permutation_array.push_back(scatter_dims_to_operand_dims[i]);
}
for (int64_t i = 0; i < operand_type.getRank(); ++i) {
if (!llvm::is_contained(scatter_dims_to_operand_dims, i)) {
permutation_array.push_back(i);
}
}
auto permutation_and_shape = GetPermutationAndTransposedShape(
permutation_array, operand_type, rewriter);
Location loc = scatter_op.getLoc();
auto transposed_operand = rewriter.create<mhlo::TransposeOp>(
loc, permutation_and_shape.shape, operands[0],
permutation_and_shape.permutation);
Value new_indices = indices;
int64_t index_depth =
permutation_and_shape.shape.getRank() - inserted_window_dims.size();
int64_t num_updates = indices_type.getDimSize(0);
if (std::is_same<TfOp, TF::TensorScatterUpdateOp>::value &&
indices_type.getRank() == 1 && updates_type.getRank() == 1 &&
index_depth == 1 && num_updates == 1) {
ImplicitLocOpBuilder builder(loc, rewriter);
auto indices_shape = BuildIntArrayConstOp(
builder, rewriter,
llvm::SmallVector<int64_t>({num_updates, index_depth}),
rewriter.getI32Type());
new_indices = rewriter.create<TF::ReshapeOp>(
loc,
RankedTensorType::get({num_updates, index_depth},
indices_type.getElementType()),
indices, indices_shape);
auto updates_shape = BuildIntArrayConstOp(
builder, rewriter,
llvm::SmallVector<int64_t>({num_updates, updates_type.getDimSize(0)}),
rewriter.getI32Type());
new_updates = rewriter.create<TF::ReshapeOp>(
loc,
RankedTensorType::get({1, updates_type.getDimSize(0)},
updates_type.getElementType()),
new_updates, updates_shape);
}
auto tf_scatter_op =
rewriter.create<TfOp>(loc, permutation_and_shape.shape,
transposed_operand, new_indices, new_updates);
auto inverse_permutation = GetInversePermutation(permutation_array, rewriter);
rewriter.replaceOpWithNewOp<mhlo::TransposeOp>(
scatter_op, scatter_op.getResult(0).getType(), tf_scatter_op,
inverse_permutation);
return success();
}
}
} | #include "xla/service/gpu/fusions/legacy/scatter.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class ScatterFusionTest : public HloTestBase {
DebugOptions GetDebugOptionsForTest() override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
}
protected:
mlir::MLIRContext mlir_context_;
};
TEST_F(ScatterFusionTest, ScatterFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
fused_computation {
%input = f32[2,9] parameter(0)
%indices = s32[3] parameter(1)
%updates = f32[3,9] parameter(2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
ENTRY entry {
%input = f32[2,9] parameter(0)
%indices = s32[3] parameter(1)
%updates = f32[3,9] parameter(2)
ROOT %fusion = f32[2,9] fusion(%input, %indices, %updates), kind=kLoop, calls=fused_computation
})")
.value();
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = HloFusionAnalysis::Create(*root, device_info);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto scatter_fusion = dynamic_cast<ScatterFusion*>(emitter.get());
ASSERT_NE(scatter_fusion, nullptr);
EXPECT_EQ(scatter_fusion->launch_dimensions().launch_bound(),
3 * 9 );
}
TEST_F(ScatterFusionTest, ThreadIdIndexing) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
computation {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%p2 = f32[] parameter(2)
%p3 = f32[] parameter(3)
ROOT %tuple = (f32[], f32[]) tuple(f32[] %p2, f32[] %p3)
}
scatter {
%operand0 = f32[300,200] parameter(0)
%operand1 = f32[300,200] parameter(1)
%indices = s32[42,1] parameter(2)
%update.1 = f32[42,10,20] parameter(3)
%update.2 = f32[42,10,20]parameter(4)
ROOT %scatter = (f32[300,200], f32[300,200]) scatter(
f32[300,200] %operand0,
f32[300,200] %operand1,
s32[42,1] %indices,
f32[42,10,20] %update.1,
f32[42,10,20] %update.2
),
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
to_apply=computation
}
ENTRY entry {
%operand0 = f32[300,200] parameter(0)
%operand1 = f32[300,200] parameter(1)
%indices = s32[42,1] parameter(2)
%update.1 = f32[42,10,20] parameter(3)
%update.2 = f32[42,10,20]parameter(4)
ROOT %fusion = (f32[300,200], f32[300,200]) fusion(
%operand0, %operand1, %indices, %update.1, %update.2),
kind=kLoop, calls=scatter
}
)"));
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = HloFusionAnalysis::Create(*root, device_info);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<ScatterFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
constexpr auto kUpdatesIndexing = R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
(bl_x * 128 + th_x) floordiv 200,
((bl_x * 128 + th_x) floordiv 20) mod 10,
(bl_x * 128 + th_x) mod 20
),
domain:
th_x in [0, 127],
th_y in [0, 0],
th_z in [0, 0],
bl_x in [0, 65],
bl_y in [0, 0],
bl_z in [0, 0],
chunk_id in [0, 0],
unroll_id in [0, 0],
bl_x * 128 + th_x in [0, 8399]
)";
mlir::SmallVector<std::string> dim_names = {"th_x", "th_y", "th_z",
"bl_x", "bl_y", "bl_z"};
mlir::SmallVector<std::string> range_names = {"chunk_id", "unroll_id"};
EXPECT_THAT(
ToString(*fusion->ComputeThreadIdToInputIndexing(
0, 3, &mlir_context_),
dim_names, range_names, {}),
MatchIndexingString(kUpdatesIndexing));
EXPECT_THAT(
ToString(*fusion->ComputeThreadIdToInputIndexing(
0, 4, &mlir_context_),
dim_names, range_names, {}),
MatchIndexingString(kUpdatesIndexing));
EXPECT_THAT(
ToString(*fusion->ComputeThreadIdToInputIndexing(
1, 3, &mlir_context_),
dim_names, range_names, {}),
MatchIndexingString(kUpdatesIndexing));
EXPECT_THAT(
ToString(*fusion->ComputeThreadIdToInputIndexing(
1, 4, &mlir_context_),
dim_names, range_names, {}),
MatchIndexingString(kUpdatesIndexing));
range_names.push_back("index_id");
constexpr auto kIndicesIndexing = R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id, index_id] ->
((bl_x * 128 + th_x) floordiv 200, 0),
domain:
th_x in [0, 127],
th_y in [0, 0],
th_z in [0, 0],
bl_x in [0, 65],
bl_y in [0, 0],
bl_z in [0, 0],
chunk_id in [0, 0],
unroll_id in [0, 0],
index_id in [0, 0],
bl_x * 128 + th_x in [0, 8399]
)";
EXPECT_THAT(
ToString(*fusion->ComputeThreadIdToInputIndexing(
0, 2, &mlir_context_),
dim_names, range_names, {}),
MatchIndexingString(kIndicesIndexing));
EXPECT_THAT(
ToString(*fusion->ComputeThreadIdToInputIndexing(
1, 2, &mlir_context_),
dim_names, range_names, {}),
MatchIndexingString(kIndicesIndexing));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c1f8f33e-1147-4fa1-b5d6-6457a9b6e08e | cpp | tensorflow/tensorflow | flatbuffer_conversions | tensorflow/lite/core/api/flatbuffer_conversions.cc | tensorflow/lite/core/api/flatbuffer_conversions_test.cc | #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class SafeBuiltinDataAllocator {
public:
class BuiltinDataDeleter {
public:
explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
void operator()(void* data) { allocator_->Deallocate(data); }
private:
BuiltinDataAllocator* allocator_;
};
template <typename T>
using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
template <typename T>
BuiltinDataPtr<T> Allocate() {
return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
BuiltinDataDeleter(allocator_));
}
private:
BuiltinDataAllocator* allocator_;
};
void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
TFLITE_DCHECK(op != nullptr);
TFLITE_DCHECK(error_reporter != nullptr);
TFLITE_DCHECK(allocator != nullptr);
TFLITE_DCHECK(builtin_data != nullptr);
}
template <typename DataType = int32_t>
static TfLiteStatus FlatBufferIntVectorToArray(
int max_size_of_buffer, const flatbuffers::Vector<DataType>* flat_vector,
DataType* buffer, ErrorReporter* error_reporter, const char* op_name) {
if (!flat_vector) {
TF_LITE_REPORT_ERROR(error_reporter,
"Input array not provided for operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
size_t num_dimensions = flat_vector->size();
if (num_dimensions > max_size_of_buffer / sizeof(DataType)) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Found too many dimensions in the input array of operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
for (size_t i = 0; i < num_dimensions; ++i) {
buffer[i] = flat_vector->Get(i);
}
}
}
return kTfLiteOk;
}
TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
switch (activation) {
case ActivationFunctionType_NONE:
return kTfLiteActNone;
case ActivationFunctionType_RELU:
return kTfLiteActRelu;
case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActReluN1To1;
case ActivationFunctionType_RELU6:
return kTfLiteActRelu6;
case ActivationFunctionType_TANH:
return kTfLiteActTanh;
case ActivationFunctionType_SIGN_BIT:
return kTfLiteActSignBit;
}
return kTfLiteActNone;
}
TfLitePadding ConvertPadding(Padding padding) {
switch (padding) {
case Padding_SAME:
return kTfLitePaddingSame;
case Padding_VALID:
return kTfLitePaddingValid;
}
return kTfLitePaddingUnknown;
}
TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {
switch (padding) {
case MirrorPadMode_REFLECT:
return kTfLiteMirrorPaddingReflect;
case MirrorPadMode_SYMMETRIC:
return kTfLiteMirrorPaddingSymmetric;
}
return kTfLiteMirrorPaddingUnknown;
}
TfLiteRngAlgorithm ConvertRngAlgorithm(RngAlgorithm algorithm) {
switch (algorithm) {
case RngAlgorithm_THREEFRY:
return kTfLiteRngAlgorithmThreefry;
case RngAlgorithm_PHILOX:
return kTfLiteRngAlgorithmPhilox;
case RngAlgorithm_DEFAULT:
return kTfLiteRngAlgorithmDefault;
}
return kTfLiteRngAlgorithmUnknown;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
auto parseLSHProjectionType = [](LSHProjectionType type) {
switch (type) {
case LSHProjectionType_SPARSE:
return kTfLiteLshProjectionSparse;
case LSHProjectionType_DENSE:
return kTfLiteLshProjectionDense;
default:
return kTfLiteLshProjectionUnknown;
}
};
auto parseCombinerType = [](CombinerType type) {
switch (type) {
case CombinerType_MEAN:
return kTfLiteCombinerTypeMean;
case CombinerType_SQRTN:
return kTfLiteCombinerTypeSqrtn;
case CombinerType_SUM:
default:
return kTfLiteCombinerTypeSum;
}
};
SafeBuiltinDataAllocator safe_allocator(allocator);
*builtin_data = nullptr;
switch (op_type) {
case BuiltinOperator_ABS: {
return ParseAbs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD: {
return ParseAdd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD_N: {
return ParseAddN(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MAX: {
return ParseArgMax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MIN: {
return ParseArgMin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ASSIGN_VARIABLE: {
return ParseAssignVariable(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_AVERAGE_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_MATMUL: {
return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_TO_SPACE_ND: {
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BROADCAST_ARGS: {
return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BROADCAST_TO: {
return ParseBroadcastTo(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CALL_ONCE: {
return ParseCallOnce(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CEIL: {
return ParseCeil(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONCATENATION: {
return ParseConcatenation(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONV_2D: {
return ParseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CUMSUM: {
return ParseCumsum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTH_TO_SPACE: {
return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEQUANTIZE: {
return ParseDequantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DIV: {
return ParseDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ELU: {
return ParseElu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EMBEDDING_LOOKUP: {
return ParseEmbeddingLookup(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXP: {
return ParseExp(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXPAND_DIMS: {
return ParseExpandDims(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FILL: {
return ParseFill(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR: {
return ParseFloor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_DIV: {
return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_MOD: {
return ParseFloorMod(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FULLY_CONNECTED: {
return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GATHER_ND: {
return ParseGatherNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER: {
return ParseGreater(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER_EQUAL: {
return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_HARD_SWISH: {
return ParseHardSwish(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_NORMALIZATION: {
return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LEAKY_RELU: {
return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS: {
return ParseLess(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS_EQUAL: {
return ParseLessEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG: {
return ParseLog(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_AND: {
return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_NOT: {
return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_OR: {
return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGISTIC: {
return ParseLogistic(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG_SOFTMAX: {
return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LSTM: {
return ParseLSTM(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAXIMUM: {
return ParseMaximum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAX_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MIRROR_PAD: {
return ParseMirrorPad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MEAN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MINIMUM: {
return ParseMinimum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MUL: {
return ParseMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_NEG: {
return ParseNeg(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_NOT_EQUAL: {
return ParseNotEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PACK: {
return ParsePack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PAD: {
return ParsePad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PADV2: {
return ParsePadV2(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_POW: {
return ParsePow(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PRELU: {
return ParsePrelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_QUANTIZE: {
return ParseQuantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_READ_VARIABLE: {
return ParseReadVariable(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ANY: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ALL: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MAX: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MIN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_PROD: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RELU: {
return ParseRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RELU6: {
return ParseRelu6(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESHAPE: {
return ParseReshape(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESIZE_BILINEAR: {
return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
return ParseResizeNearestNeighbor(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_ROUND: {
return ParseRound(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RSQRT: {
return ParseRsqrt(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SELECT_V2: {
return ParseSelectV2(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SHAPE: {
return ParseShape(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SIN: {
return ParseSin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SOFTMAX: {
return ParseSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_BATCH_ND: {
return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_DEPTH: {
return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT: {
return ParseSplit(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT_V: {
return ParseSplitV(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQRT: {
return ParseSqrt(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUARE: {
return ParseSquare(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUARED_DIFFERENCE: {
return ParseSquaredDifference(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_SQUEEZE: {
return ParseSqueeze(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STRIDED_SLICE: {
return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SUB: {
return ParseSub(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SUM: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SVDF: {
return ParseSvdf(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TANH: {
return ParseTanh(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TRANSPOSE_CONV: {
return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_UNPACK: {
return ParseUnpack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_VAR_HANDLE: {
return ParseVarHandle(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ZEROS_LIKE: {
return ParseZerosLike(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BITWISE_XOR: {
return ParseBitwiseXor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RIGHT_SHIFT: {
return ParseRightShift(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CAST: {
return ParseCast(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LSH_PROJECTION: {
auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lshParams =
op->builtin_options_as_LSHProjectionOptions()) {
params->type = parseLSHProjectionType(lshParams->type());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sequence_rnn_params =
op->builtin_options_as_SequenceRNNOptions()) {
params->activation =
ConvertActivation(sequence_rnn_params->fused_activation_function());
params->time_major = sequence_rnn_params->time_major();
params->asymmetric_quantize_inputs =
sequence_rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
auto params =
safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_sequence_rnn_params =
op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
params->activation = ConvertActivation(
bidi_sequence_rnn_params->fused_activation_function());
params->time_major = bidi_sequence_rnn_params->time_major();
params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
params->asymmetric_quantize_inputs =
bidi_sequence_rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RNN: {
auto params = safe_allocator.Allocate<TfLiteRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
params->activation =
ConvertActivation(rnn_params->fused_activation_function());
params->asymmetric_quantize_inputs =
rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
auto params =
safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* embedding_params =
op->builtin_options_as_EmbeddingLookupSparseOptions()) {
params->combiner = parseCombinerType(embedding_params->combiner());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE_LOOKUP:
return kTfLiteOk;
case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_LocalResponseNormalizationOptions()) {
params->radius = schema_params->radius();
params->bias = schema_params->bias();
params->alpha = schema_params->alpha();
params->beta = schema_params->beta();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
auto params =
safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_lstm_params =
op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
params->activation =
ConvertActivation(bidi_lstm_params->fused_activation_function());
params->cell_clip = bidi_lstm_params->cell_clip();
params->proj_clip = bidi_lstm_params->proj_clip();
params->merge_outputs = bidi_lstm_params->merge_outputs();
params->time_major = bidi_lstm_params->time_major();
params->asymmetric_quantize_inputs =
bidi_lstm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_SKIP_GRAM: {
auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* skip_gram_params =
op->builtin_options_as_SkipGramOptions()) {
params->ngram_size = skip_gram_params->ngram_size();
params->max_skip_size = skip_gram_params->max_skip_size();
params->include_all_ngrams = skip_gram_params->include_all_ngrams();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_GATHER: {
return ParseGather(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPARSE_TO_DENSE: {
auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sparse_to_dense_params =
op->builtin_options_as_SparseToDenseOptions()) {
params->validate_indices = sparse_to_dense_params->validate_indices();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_DELEGATE: {
TF_LITE_REPORT_ERROR(error_reporter,
"DELEGATE op shouldn't exist in model.");
return kTfLiteError;
}
case BuiltinOperator_FAKE_QUANT: {
auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_FakeQuantOptions()) {
params->min = schema_params->min();
params->max = schema_params->max();
params->num_bits = schema_params->num_bits();
params->narrow_range = schema_params->narrow_range();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_ONE_HOT: {
auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
params->axis = schema_params->axis();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIQUE: {
auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* unique_params = op->builtin_options_as_UniqueOptions();
if (unique_params != nullptr) {
params->index_out_type =
unique_params->idx_out_type() == tflite::TensorType_INT64
? TfLiteType::kTfLiteInt64
: TfLiteType::kTfLiteInt32;
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_REVERSE_SEQUENCE: {
auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* reverse_seq_params =
op->builtin_options_as_ReverseSequenceOptions()) {
params->seq_dim = reverse_seq_params->seq_dim();
params->batch_dim = reverse_seq_params->batch_dim();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_IF: {
auto params = safe_allocator.Allocate<TfLiteIfParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* if_params = op->builtin_options_as_IfOptions()) {
params->then_subgraph_index = if_params->then_subgraph_index();
params->else_subgraph_index = if_params->else_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_WHILE: {
auto params = safe_allocator.Allocate<TfLiteWhileParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
params->cond_subgraph_index = while_params->cond_subgraph_index();
params->body_subgraph_index = while_params->body_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CONV_3D:
case BuiltinOperator_CONV_3D_TRANSPOSE: {
auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
params->padding = ConvertPadding(conv3d_params->padding());
params->activation =
ConvertActivation(conv3d_params->fused_activation_function());
params->stride_depth = conv3d_params->stride_d();
params->stride_height = conv3d_params->stride_h();
params->stride_width = conv3d_params->stride_w();
params->dilation_depth_factor = conv3d_params->dilation_d_factor();
params->dilation_height_factor = conv3d_params->dilation_h_factor();
params->dilation_width_factor = conv3d_params->dilation_w_factor();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE: {
auto params = safe_allocator.Allocate<TfLiteHashtableParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* hashtable_params =
op->builtin_options_as_HashtableOptions()) {
params->table_id = hashtable_params->table_id();
TF_LITE_ENSURE_STATUS(ConvertTensorType(
hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),
¶ms->value_dtype,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_MULTINOMIAL: {
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* multinomial_params =
op->builtin_options_as_RandomOptions()) {
params->seed = multinomial_params->seed();
params->seed2 = multinomial_params->seed2();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RANDOM_STANDARD_NORMAL: {
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* random_std_normal_params =
op->builtin_options_as_RandomOptions()) {
params->seed = random_std_normal_params->seed();
params->seed2 = random_std_normal_params->seed2();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BUCKETIZE: {
auto params = safe_allocator.Allocate<TfLiteBucketizeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bucketize_params =
op->builtin_options_as_BucketizeOptions()) {
const flatbuffers::Vector<float>* boundaries =
bucketize_params->boundaries();
if (boundaries == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"boundaries array not provided for operation 'bucketize'.\n");
return kTfLiteError;
}
params->num_boundaries = boundaries->size();
if (boundaries->data() == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter,
"boundaries.data() returned nullptr for "
"operation 'bucketize'.\n");
return kTfLiteError;
}
params->boundaries = boundaries->data();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RANDOM_UNIFORM: {
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* random_uniform_params =
op->builtin_options_as_RandomOptions()) {
params->seed = random_uniform_params->seed();
params->seed2 = random_uniform_params->seed2();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_GELU: {
auto params = safe_allocator.Allocate<TfLiteGeluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) {
params->approximate = gelu_params->approximate();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_STABLEHLO_SCATTER: {
return ParseStablehloScatter(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR: {
return ParseStablehloRngBitGenerator(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_STABLEHLO_GATHER: {
return ParseStablehloGather(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STABLEHLO_REDUCE_WINDOW: {
return ParseStablehloReduceWindow(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_REDUCE_WINDOW: {
auto params = safe_allocator.Allocate<TfLiteReduceWindowParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* reduce_params =
op->builtin_options_2_as_ReduceWindowOptions()) {
switch (reduce_params->reduce_function()) {
case ReduceWindowFunction_ADD:
params->reduce_function = TfLiteReduceWindowFunctionAdd;
break;
case ReduceWindowFunction_MUL:
params->reduce_function = TfLiteReduceWindowFunctionMul;
break;
case ReduceWindowFunction_MINIMUM:
params->reduce_function = TfLiteReduceWindowFunctionMin;
break;
case ReduceWindowFunction_MAXIMUM:
params->reduce_function = TfLiteReduceWindowFunctionMax;
break;
case ReduceWindowFunction_ALL:
params->reduce_function = TfLiteReduceWindowFunctionAll;
break;
case ReduceWindowFunction_ANY:
params->reduce_function = TfLiteReduceWindowFunctionAny;
break;
case ReduceWindowFunction_UNSUPPORTED:
default:
return kTfLiteError;
}
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_STABLEHLO_PAD: {
return ParseStablehloPad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STABLEHLO_COMPOSITE: {
return ParseStablehloComposite(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_STABLEHLO_SHIFT_LEFT: {
return ParseStablehloShiftLeft(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_STABLEHLO_SLICE:
case BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM:
case BuiltinOperator_STABLEHLO_CONVOLUTION:
case BuiltinOperator_STABLEHLO_LOGISTIC:
case BuiltinOperator_STABLEHLO_ADD:
case BuiltinOperator_STABLEHLO_DIVIDE:
case BuiltinOperator_STABLEHLO_MULTIPLY:
case BuiltinOperator_STABLEHLO_MAXIMUM:
case BuiltinOperator_STABLEHLO_RESHAPE:
case BuiltinOperator_STABLEHLO_CLAMP:
case BuiltinOperator_STABLEHLO_CONCATENATE:
case BuiltinOperator_STABLEHLO_CUSTOM_CALL:
case BuiltinOperator_STABLEHLO_REDUCE:
case BuiltinOperator_STABLEHLO_ABS:
case BuiltinOperator_STABLEHLO_AND:
case BuiltinOperator_STABLEHLO_COSINE:
case BuiltinOperator_STABLEHLO_EXPONENTIAL:
case BuiltinOperator_STABLEHLO_FLOOR:
case BuiltinOperator_STABLEHLO_LOG:
case BuiltinOperator_STABLEHLO_MINIMUM:
case BuiltinOperator_STABLEHLO_NEGATE:
case BuiltinOperator_STABLEHLO_OR:
case BuiltinOperator_STABLEHLO_POWER:
case BuiltinOperator_STABLEHLO_REMAINDER:
case BuiltinOperator_STABLEHLO_RSQRT:
case BuiltinOperator_STABLEHLO_SELECT:
case BuiltinOperator_STABLEHLO_SUBTRACT:
case BuiltinOperator_STABLEHLO_TANH:
case BuiltinOperator_STABLEHLO_DYNAMIC_SLICE:
case BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE:
case BuiltinOperator_STABLEHLO_IOTA:
case BuiltinOperator_STABLEHLO_COMPARE:
case BuiltinOperator_STABLEHLO_CONVERT:
case BuiltinOperator_STABLEHLO_DOT_GENERAL:
case BuiltinOperator_STABLEHLO_SORT:
case BuiltinOperator_STABLEHLO_WHILE:
case BuiltinOperator_STABLEHLO_TRANSPOSE:
case BuiltinOperator_STABLEHLO_CBRT:
case BuiltinOperator_CALL:
case BuiltinOperator_COMPLEX_ABS:
case BuiltinOperator_CONCAT_EMBEDDINGS:
case BuiltinOperator_COS:
case BuiltinOperator_CUSTOM:
case BuiltinOperator_DENSIFY:
case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
case BuiltinOperator_EQUAL:
case BuiltinOperator_HASHTABLE_FIND:
case BuiltinOperator_HASHTABLE_IMPORT:
case BuiltinOperator_HASHTABLE_SIZE:
case BuiltinOperator_IMAG:
case BuiltinOperator_MATRIX_DIAG:
case BuiltinOperator_MATRIX_SET_DIAG:
case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_RELU_0_TO_1:
case BuiltinOperator_SCATTER_ND:
case BuiltinOperator_SELECT:
case BuiltinOperator_SLICE:
case BuiltinOperator_TILE:
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_RANGE:
case BuiltinOperator_RANK:
case BuiltinOperator_REAL:
case BuiltinOperator_RFFT2D:
case BuiltinOperator_SEGMENT_SUM:
case BuiltinOperator_REVERSE_V2:
case BuiltinOperator_UNSORTED_SEGMENT_MAX:
case BuiltinOperator_UNSORTED_SEGMENT_MIN:
case BuiltinOperator_UNSORTED_SEGMENT_PROD:
case BuiltinOperator_UNSORTED_SEGMENT_SUM:
case BuiltinOperator_ATAN2:
case BuiltinOperator_SIGN:
case BuiltinOperator_BITCAST:
case BuiltinOperator_WHERE:
case BuiltinOperator_DILATE:
return kTfLiteOk;
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
return kTfLiteError;
}
return kTfLiteError;
}
#endif
}
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter) {
switch (tensor_type) {
case TensorType_FLOAT16:
*type = kTfLiteFloat16;
return kTfLiteOk;
case TensorType_BFLOAT16:
*type = kTfLiteBFloat16;
return kTfLiteOk;
case TensorType_FLOAT32:
*type = kTfLiteFloat32;
return kTfLiteOk;
case TensorType_FLOAT64:
*type = kTfLiteFloat64;
return kTfLiteOk;
case TensorType_INT16:
*type = kTfLiteInt16;
return kTfLiteOk;
case TensorType_UINT16:
*type = kTfLiteUInt16;
return kTfLiteOk;
case TensorType_INT32:
*type = kTfLiteInt32;
return kTfLiteOk;
case TensorType_UINT32:
*type = kTfLiteUInt32;
return kTfLiteOk;
case TensorType_UINT8:
*type = kTfLiteUInt8;
return kTfLiteOk;
case TensorType_INT8:
*type = kTfLiteInt8;
return kTfLiteOk;
case TensorType_INT64:
*type = kTfLiteInt64;
return kTfLiteOk;
case TensorType_UINT64:
*type = kTfLiteUInt64;
return kTfLiteOk;
case TensorType_STRING:
*type = kTfLiteString;
return kTfLiteOk;
case TensorType_BOOL:
*type = kTfLiteBool;
return kTfLiteOk;
case TensorType_COMPLEX64:
*type = kTfLiteComplex64;
return kTfLiteOk;
case TensorType_COMPLEX128:
*type = kTfLiteComplex128;
return kTfLiteOk;
case TensorType_RESOURCE:
*type = kTfLiteResource;
return kTfLiteOk;
case TensorType_VARIANT:
*type = kTfLiteVariant;
return kTfLiteOk;
case TensorType_INT4:
*type = kTfLiteInt4;
return kTfLiteOk;
default:
*type = kTfLiteNoType;
TF_LITE_REPORT_ERROR(error_reporter,
"Unsupported data type %d in tensor\n", tensor_type);
return kTfLiteError;
}
}
TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteAddParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const AddOptions* schema_params = op->builtin_options_as_AddOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
return kTfLiteOk;
}
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteArgMaxParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteArgMaxParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->output_type(), ¶ms->output_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteArgMinParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteArgMinParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->output_type(), ¶ms->output_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
params->adj_x = bmm_params->adj_x();
params->adj_y = bmm_params->adj_y();
params->asymmetric_quantize_inputs =
bmm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteCallOnceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteCallOnceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const CallOnceOptions* schema_params =
op->builtin_options_as_CallOnceOptions();
if (schema_params != nullptr) {
params->init_subgraph_index = schema_params->init_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCastParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->in_data_type(), ¶ms->in_data_type, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
¶ms->out_data_type,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseConcatenation(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteConcatenationParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteConcatenationParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ConcatenationOptions* schema_params =
op->builtin_options_as_ConcatenationOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->axis = schema_params->axis();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
¶ms->quantized_bias_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
params->exclusive = cumsum_params->exclusive();
params->reverse = cumsum_params->reverse();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthToSpaceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthwiseConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const DepthwiseConv2DOptions* schema_params =
op->builtin_options_as_DepthwiseConv2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->depth_multiplier = schema_params->depth_multiplier();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteDivParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseEmbeddingLookup(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteFullyConnectedParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const FullyConnectedOptions* schema_params =
op->builtin_options_as_FullyConnectedOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->keep_num_dims = schema_params->keep_num_dims();
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();
TF_LITE_ENSURE_STATUS(
ConvertTensorType(schema_params->quantized_bias_type(),
¶ms->quantized_bias_type, error_reporter));
switch (schema_params->weights_format()) {
case FullyConnectedOptionsWeightsFormat_DEFAULT:
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
break;
case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
params->weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
break;
default:
TF_LITE_REPORT_ERROR(error_reporter,
"Unhandled fully-connected weights format.");
return kTfLiteError;
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
params->axis = 0;
params->batch_dims = 0;
if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
params->axis = gather_params->axis();
params->batch_dims = gather_params->batch_dims();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteIfParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteIfParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const IfOptions* schema_params = op->builtin_options_as_IfOptions();
if (schema_params != nullptr) {
params->then_subgraph_index = schema_params->then_subgraph_index();
params->else_subgraph_index = schema_params->else_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteL2NormParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteL2NormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* leaky_relu_params =
op->builtin_options_as_LeakyReluOptions()) {
params->alpha = leaky_relu_params->alpha();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
params->activation =
ConvertActivation(lstm_params->fused_activation_function());
params->cell_clip = lstm_params->cell_clip();
params->proj_clip = lstm_params->proj_clip();
switch (lstm_params->kernel_type()) {
case LSTMKernelType_FULL:
params->kernel_type = kTfLiteLSTMFullKernel;
break;
case LSTMKernelType_BASIC:
params->kernel_type = kTfLiteLSTMBasicKernel;
break;
default:
TF_LITE_REPORT_ERROR(error_reporter, "Unhandled LSTM kernel type: %d",
lstm_params->kernel_type());
return kTfLiteError;
}
params->asymmetric_quantize_inputs =
lstm_params->asymmetric_quantize_inputs();
} else {
TF_LITE_REPORT_ERROR(error_reporter, "No valid LSTM builtin options exist");
return kTfLiteError;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteMirrorPaddingParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const MirrorPadOptions* schema_params =
op->builtin_options_as_MirrorPadOptions();
if (schema_params != nullptr) {
params->mode = ConvertMirrorPadding(schema_params->mode());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const MulOptions* schema_params = op->builtin_options_as_MulOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLitePackParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLitePackParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const PackOptions* schema_params = op->builtin_options_as_PackOptions();
if (schema_params != nullptr) {
params->values_count = schema_params->values_count();
params->axis = schema_params->axis();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLitePoolParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLitePoolParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->filter_width = schema_params->filter_width();
params->filter_height = schema_params->filter_height();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteReducerParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteReducerParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
if (schema_params != nullptr) {
params->keep_dims = schema_params->keep_dims();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteReshapeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteReshapeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
if (schema_params != nullptr) {
const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
if (new_shape != nullptr) {
TF_LITE_ENSURE_STATUS(
FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
params->shape, error_reporter, "reshape"));
params->num_dimensions = new_shape->size();
} else {
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseResizeBilinear(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteResizeBilinearParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ResizeBilinearOptions* schema_params =
op->builtin_options_as_ResizeBilinearOptions();
if (schema_params != nullptr) {
params->align_corners = schema_params->align_corners();
params->half_pixel_centers = schema_params->half_pixel_centers();
} else {
params->align_corners = false;
params->half_pixel_centers = false;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteResizeNearestNeighborParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ResizeNearestNeighborOptions* schema_params =
op->builtin_options_as_ResizeNearestNeighborOptions();
if (schema_params != nullptr) {
params->align_corners = schema_params->align_corners();
params->half_pixel_centers = schema_params->half_pixel_centers();
} else {
params->align_corners = false;
params->half_pixel_centers = false;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloReduceWindow(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteStablehloReduceWindowParams>();
const StablehloReduceWindowOptions* schema_params =
op->builtin_options_2_as_StablehloReduceWindowOptions();
if (schema_params) {
if (!schema_params->window_dimensions() ||
schema_params->window_dimensions()->size() == 0) {
TF_LITE_REPORT_ERROR(error_reporter,
"'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty.");
return kTfLiteError;
}
const size_t rank = schema_params->window_dimensions()->size();
auto LoadAttr = [&error_reporter](
int64_t* params_array, size_t params_array_size_bytes,
const flatbuffers::Vector<int64_t>* flatbuffer_vector,
const char* attr_name, const size_t expected_size,
const int64_t fill_value) -> TfLiteStatus {
if (flatbuffer_vector && flatbuffer_vector->size()) {
if (expected_size != 0 && flatbuffer_vector->size() != expected_size) {
TF_LITE_REPORT_ERROR(
error_reporter,
"'%s' attribute of 'stablehlo.reduce_window' does not have the "
"expected size (%llu != %llu).",
attr_name, flatbuffer_vector->size(), expected_size);
return kTfLiteError;
}
TfLiteStatus status = FlatBufferIntVectorToArray(
params_array_size_bytes, flatbuffer_vector, params_array,
error_reporter, "stablehlo.reduce_window");
if (status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.",
attr_name);
return status;
}
} else {
std::fill_n(params_array, params_array_size_bytes / sizeof(int64_t),
fill_value);
}
return kTfLiteOk;
};
TF_LITE_ENSURE_STATUS(
LoadAttr(params->window_dimensions, sizeof(params->window_dimensions),
schema_params->window_dimensions(), "window_dimensions",
rank, 1));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->window_strides, sizeof(params->window_strides),
schema_params->window_strides(), "window_strides",
rank, 1));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->base_dilations, sizeof(params->base_dilations),
schema_params->base_dilations(), "base_dilations",
rank, 1));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->window_dilations, sizeof(params->window_dilations),
schema_params->window_dilations(), "window_dilations",
rank, 1));
TF_LITE_ENSURE_STATUS(LoadAttr(params->padding, sizeof(params->padding),
schema_params->padding(), "padding",
2 * rank,
0));
params->body_subgraph_index = schema_params->body_subgraph_index();
*builtin_data = params.release();
return kTfLiteOk;
}
TF_LITE_REPORT_ERROR(
error_reporter,
"Could not get 'stablehlo.reduce_window' operation parameters.");
return kTfLiteError;
}
TfLiteStatus ParseStablehloScatter(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStablehloScatterParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStablehloScatterParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StablehloScatterOptions* schema_params =
op->builtin_options_2_as_StablehloScatterOptions();
if (schema_params) {
params->indices_are_sorted = schema_params->indices_are_sorted();
if (schema_params->update_window_dims()) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->update_window_dims()->size() * sizeof(int64_t),
schema_params->update_window_dims(), params->update_window_dims,
error_reporter, "stablehlo_scatter"));
params->num_update_window_dims =
schema_params->update_window_dims()->size();
}
if (schema_params->inserted_window_dims()) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->inserted_window_dims()->size() * sizeof(int64_t),
schema_params->inserted_window_dims(), params->inserted_window_dims,
error_reporter, "stablehlo_scatter"));
params->num_inserted_window_dims =
schema_params->inserted_window_dims()->size();
}
if (schema_params->scatter_dims_to_operand_dims()) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->scatter_dims_to_operand_dims()->size() *
sizeof(int64_t),
schema_params->scatter_dims_to_operand_dims(),
params->scatter_dims_to_operand_dims, error_reporter,
"stablehlo_scatter"));
params->num_scatter_dims_to_operand_dims =
schema_params->scatter_dims_to_operand_dims()->size();
}
params->index_vector_dim = schema_params->index_vector_dim();
params->unique_indices = schema_params->unique_indices();
params->update_computation_subgraph_index =
schema_params->update_computation_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloRngBitGenerator(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStablehloRngBitGeneratorParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStablehloRngBitGeneratorParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StablehloRngBitGeneratorOptions* schema_params =
op->builtin_options_2_as_StablehloRngBitGeneratorOptions();
if (schema_params != nullptr) {
params->algorithm = ConvertRngAlgorithm(schema_params->algorithm());
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloGather(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStablehloGatherParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStablehloGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StablehloGatherOptions* schema_params =
op->builtin_options_2_as_StablehloGatherOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->offset_dims()->size() *
sizeof(int64_t),
schema_params->offset_dims(),
params->offset_dims, error_reporter,
"stablehlo_gather"));
params->num_offset_dims = schema_params->offset_dims()->size();
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->collapsed_slice_dims()->size() * sizeof(int64_t),
schema_params->collapsed_slice_dims(), params->collapsed_slice_dims,
error_reporter, "stablehlo_gather"));
params->num_collapsed_slice_dims =
schema_params->collapsed_slice_dims()->size();
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->start_index_map()->size() * sizeof(int64_t),
schema_params->start_index_map(), params->start_index_map,
error_reporter, "stablehlo_gather"));
params->num_start_index_map = schema_params->start_index_map()->size();
params->index_vector_dim = schema_params->index_vector_dim();
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray<int64_t>(
schema_params->slice_sizes()->size() * sizeof(int64_t),
schema_params->slice_sizes(), params->slice_sizes, error_reporter,
"stablehlo_gather"));
params->num_slice_sizes = schema_params->slice_sizes()->size();
params->indices_are_sorted = schema_params->indices_are_sorted();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseStablehloPad(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteStablehloPadParams>();
const StablehloPadOptions* schema_params =
op->builtin_options_2_as_StablehloPadOptions();
if (schema_params) {
auto LoadAttr =
[&error_reporter](
int64_t* params_array, const size_t params_array_size_bytes,
const flatbuffers::Vector<int64_t>* const flatbuffer_vector,
const char* const attr_name) -> TfLiteStatus {
TfLiteStatus status = FlatBufferIntVectorToArray(
params_array_size_bytes, flatbuffer_vector, params_array,
error_reporter, "stablehlo.pad");
if (status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Check the '%s' attribute.",
attr_name);
}
return status;
};
TF_LITE_ENSURE_STATUS(
LoadAttr(params->edge_padding_low, sizeof(params->edge_padding_low),
schema_params->edge_padding_low(), "edge_padding_low"));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->edge_padding_high, sizeof(params->edge_padding_high),
schema_params->edge_padding_high(), "edge_padding_high"));
TF_LITE_ENSURE_STATUS(
LoadAttr(params->interior_padding, sizeof(params->interior_padding),
schema_params->interior_padding(), "interior_padding"));
if (schema_params->edge_padding_low()->size() !=
schema_params->edge_padding_high()->size() ||
schema_params->edge_padding_low()->size() !=
schema_params->interior_padding()->size()) {
TF_LITE_REPORT_ERROR(error_reporter,
"'stablehlo.pad' operation parameter array sizes "
"are not consistent.");
return kTfLiteError;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TF_LITE_REPORT_ERROR(error_reporter,
"Could not get 'stablehlo.pad' operation parameters.");
return kTfLiteError;
}
TfLiteStatus ParseStablehloComposite(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteStablehloCompositeParams>();
const StableHLOCompositeOptions* schema_params =
op->builtin_options_2_as_StableHLOCompositeOptions();
if (schema_params) {
params->name = schema_params->name()->c_str();
params->version = schema_params->version();
params->subgraph_index = schema_params->decomposition_subgraph_index();
params->attributes = schema_params->composite_attributes()->data();
params->attributes_size = schema_params->composite_attributes()->size();
*builtin_data = params.release();
return kTfLiteOk;
}
TF_LITE_REPORT_ERROR(
error_reporter,
"Could not get 'stablehlo.composite' operation parameters.");
return kTfLiteError;
}
TfLiteStatus ParseStablehloShiftLeft(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
return kTfLiteOk;
}
TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteShapeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteShapeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
¶ms->out_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSoftmaxParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
if (schema_params != nullptr) {
params->beta = schema_params->beta();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSpaceToDepthParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSplitParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSplitParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
if (schema_params != nullptr) {
params->num_splits = schema_params->num_splits();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSplitVParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSplitVParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
if (schema_params != nullptr) {
params->num_splits = schema_params->num_splits();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params =
safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* seq_lstm_params =
op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
params->activation =
ConvertActivation(seq_lstm_params->fused_activation_function());
params->cell_clip = seq_lstm_params->cell_clip();
params->proj_clip = seq_lstm_params->proj_clip();
params->time_major = seq_lstm_params->time_major();
params->asymmetric_quantize_inputs =
seq_lstm_params->asymmetric_quantize_inputs();
params->diagonal_recurrent_tensors =
seq_lstm_params->diagonal_recurrent_tensors();
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSqueezeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSqueezeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();
if (schema_params != nullptr) {
const auto* squeeze_dims = schema_params->squeeze_dims();
if (squeeze_dims != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
error_reporter, "squeeze"));
params->num_squeeze_dims = squeeze_dims->size();
} else {
params->num_squeeze_dims = 0;
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseStridedSlice(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStridedSliceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StridedSliceOptions* schema_params =
op->builtin_options_as_StridedSliceOptions();
if (schema_params != nullptr) {
params->begin_mask = schema_params->begin_mask();
params->end_mask = schema_params->end_mask();
params->ellipsis_mask = schema_params->ellipsis_mask();
params->new_axis_mask = schema_params->new_axis_mask();
params->shrink_axis_mask = schema_params->shrink_axis_mask();
params->offset = schema_params->offset();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSubParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SubOptions* schema_params = op->builtin_options_as_SubOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSVDFParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSVDFParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
if (schema_params != nullptr) {
params->rank = schema_params->rank();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteTransposeConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const TransposeConvOptions* transpose_conv_params =
op->builtin_options_as_TransposeConvOptions();
if (transpose_conv_params != nullptr) {
params->padding = ConvertPadding(transpose_conv_params->padding());
params->stride_width = transpose_conv_params->stride_w();
params->stride_height = transpose_conv_params->stride_h();
params->activation =
ConvertActivation(transpose_conv_params->fused_activation_function());
TF_LITE_ENSURE_STATUS(
ConvertTensorType(transpose_conv_params->quantized_bias_type(),
¶ms->quantized_bias_type, error_reporter));
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteUnpackParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteUnpackParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
if (schema_params != nullptr) {
params->num = schema_params->num();
params->axis = schema_params->axis();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteVarHandleParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteVarHandleParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const VarHandleOptions* schema_params =
op->builtin_options_as_VarHandleOptions();
if (schema_params != nullptr) {
if (schema_params->container()) {
params->container = schema_params->container()->c_str();
}
if (schema_params->shared_name()) {
params->shared_name = schema_params->shared_name()->c_str();
}
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteWhileParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteWhileParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const WhileOptions* schema_params = op->builtin_options_as_WhileOptions();
if (schema_params != nullptr) {
params->cond_subgraph_index = schema_params->cond_subgraph_index();
params->body_subgraph_index = schema_params->body_subgraph_index();
} else {
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseBitwiseXor(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseRightShift(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
#ifdef TF_LITE_STATIC_MEMORY
TF_LITE_REPORT_ERROR(
error_reporter,
"ParseOpData is unsupported on TfLiteMicro, please use the operator "
"specific parse functions (e.g. ParseAdd etc.).\n");
return kTfLiteError;
#else
return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
builtin_data);
#endif
}
} | #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <cstdarg>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
using testing::AllOf;
using testing::Each;
using testing::ElementsAre;
using testing::Eq;
using testing::HasSubstr;
using testing::StrEq;
namespace tflite {
namespace {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ += vsnprintf(buffer_ + buffer_size_,
kBufferSize - buffer_size_, format, args);
return buffer_size_;
}
const char* GetBuffer() const { return buffer_; }
int GetBufferSize() const { return buffer_size_; }
bool IsEmpty() const { return !buffer_size_; }
string GetString() const { return string(buffer_, buffer_size_); }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
class MockDataAllocator : public BuiltinDataAllocator {
public:
MockDataAllocator() : is_allocated_(false) {}
void* Allocate(size_t size, size_t alignment_hint) override {
EXPECT_FALSE(is_allocated_);
const int max_size = kBufferSize;
EXPECT_LE(size, max_size);
is_allocated_ = true;
return buffer_;
}
void Deallocate(void* data) override { is_allocated_ = false; }
private:
static constexpr int kBufferSize = 1024;
char buffer_[kBufferSize];
bool is_allocated_;
};
}
class FlatbufferConversionsTest : public ::testing::Test {
public:
const Operator* BuildTestOperator(BuiltinOptions op_type,
flatbuffers::Offset<void> options) {
flatbuffers::Offset<Operator> offset =
CreateOperatorDirect(builder_, 0, nullptr, nullptr, op_type, options,
nullptr, CustomOptionsFormat_FLEXBUFFERS, nullptr);
builder_.Finish(offset);
void* pointer = builder_.GetBufferPointer();
return flatbuffers::GetRoot<Operator>(pointer);
}
const Operator* BuildTestOperator(BuiltinOptions2 op_type,
flatbuffers::Offset<void> options) {
flatbuffers::Offset<Operator> offset = CreateOperatorDirect(
builder_, 0, nullptr, nullptr,
tflite::BuiltinOptions_NONE,
0, nullptr,
tflite::CustomOptionsFormat_FLEXBUFFERS,
nullptr, nullptr,
0, 0,
op_type,
options);
builder_.Finish(offset);
void* pointer = builder_.GetBufferPointer();
return flatbuffers::GetRoot<Operator>(pointer);
}
protected:
MockErrorReporter mock_reporter_;
MockDataAllocator mock_allocator_;
flatbuffers::FlatBufferBuilder builder_;
};
TEST_F(FlatbufferConversionsTest, ParseSqueezeAll) {
const Operator* op = BuildTestOperator(
BuiltinOptions_SqueezeOptions, CreateSqueezeOptions(builder_).Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_SQUEEZE, &mock_reporter_,
&mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, ParseDynamicReshape) {
const Operator* op = BuildTestOperator(
BuiltinOptions_ReshapeOptions, CreateReshapeOptions(builder_).Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_RESHAPE, &mock_reporter_,
&mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, TestParseOpDataConv) {
const Operator* conv_op =
BuildTestOperator(BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(builder_, Padding_SAME, 1, 2,
ActivationFunctionType_RELU, 3, 4)
.Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk,
ParseOpData(conv_op, BuiltinOperator_CONV_2D, &mock_reporter_,
&mock_allocator_, &output_data));
EXPECT_NE(nullptr, output_data);
TfLiteConvParams* params = reinterpret_cast<TfLiteConvParams*>(output_data);
EXPECT_EQ(kTfLitePaddingSame, params->padding);
EXPECT_EQ(1, params->stride_width);
EXPECT_EQ(2, params->stride_height);
EXPECT_EQ(kTfLiteActRelu, params->activation);
EXPECT_EQ(3, params->dilation_width_factor);
EXPECT_EQ(4, params->dilation_height_factor);
}
TEST_F(FlatbufferConversionsTest, ParseBadFullyConnected) {
const Operator* conv_op = BuildTestOperator(
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(
builder_, ActivationFunctionType_RELU,
static_cast<FullyConnectedOptionsWeightsFormat>(-1), true)
.Union());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteError,
ParseOpData(conv_op, BuiltinOperator_FULLY_CONNECTED,
&mock_reporter_, &mock_allocator_, &output_data));
}
TEST_F(FlatbufferConversionsTest, TestParseOpDataCustom) {
const Operator* custom_op =
BuildTestOperator(BuiltinOptions_NONE, flatbuffers::Offset<void>());
void* output_data = nullptr;
EXPECT_EQ(kTfLiteOk,
ParseOpData(custom_op, BuiltinOperator_CUSTOM, &mock_reporter_,
&mock_allocator_, &output_data));
EXPECT_EQ(nullptr, output_data);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorType) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_FLOAT32, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteFloat32, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeFloat16) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_FLOAT16, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteFloat16, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeBFloat16) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_BFLOAT16, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteBFloat16, type);
}
TEST_F(FlatbufferConversionsTest, TestConvertTensorTypeInt4) {
TfLiteType type;
EXPECT_EQ(kTfLiteOk,
ConvertTensorType(TensorType_INT4, &type, &mock_reporter_));
EXPECT_EQ(kTfLiteInt4, type);
}
class StablehloReduceWindowFlatbufferConversionsTest
: public FlatbufferConversionsTest {
public:
static constexpr int kMaxDims =
TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT;
static constexpr int64_t kValidValue = 5;
auto ValidAttr() {
return builder_.CreateVector(std::vector<int64_t>(kMaxDims, kValidValue));
}
auto InvalidAttr() {
return builder_.CreateVector(
std::vector<int64_t>(kMaxDims + 1, kValidValue));
}
auto ValidPaddingAttr() {
return builder_.CreateVector(
std::vector<int64_t>(2 * kMaxDims, kValidValue));
}
auto InvalidPaddingAttr() {
return builder_.CreateVector(
std::vector<int64_t>(2 * kMaxDims + 1, kValidValue));
}
auto EmptyAttr() { return builder_.CreateVector<int64_t>({}); }
};
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, Succeeds) {
const Operator* stablehlo_reduce_window_op = BuildTestOperator(
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
builder_.CreateVector<int64_t>({1, 2}),
builder_.CreateVector<int64_t>({3, 4}),
builder_.CreateVector<int64_t>({5, 6}),
builder_.CreateVector<int64_t>({7, 8}),
builder_.CreateVector<int64_t>({9, 10, 11, 12}),
13)
.Union());
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, 2),
ElementsAre(1, 2));
EXPECT_THAT(std::make_tuple(output_data->window_strides, 2),
ElementsAre(3, 4));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, 2),
ElementsAre(5, 6));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, 2),
ElementsAre(7, 8));
EXPECT_THAT(std::make_tuple(output_data->padding, 4),
ElementsAre(9, 10, 11, 12));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWithNoWindowDimensions) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
0,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoWindowStrides) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
0,
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoBaseDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
0,
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithNoWindowDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
0,
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(1));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, SucceedsWithNoPadding) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
0,
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWithEmptyWindowDimensions) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
EmptyAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'window_dimensions' attribute is not optional for "
"'stablehlo.reduce_window' and cannot be empty."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyWindowStrides) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
EmptyAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyBaseDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
EmptyAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyWindowDilations) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
EmptyAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(1));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),
Each(kValidValue));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithEmptyPadding) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
EmptyAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),
Each(kValidValue));
EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));
EXPECT_THAT(output_data->body_subgraph_index, Eq(13));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
SucceedsWithParamsAtMaxDims) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(mock_reporter_.GetString(), StrEq(""));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowDimensionsHasMoreThanMaxDims) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
InvalidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
AllOf(HasSubstr("Found too many dimensions in the input array of "
"operation 'stablehlo.reduce_window'."),
HasSubstr("Check the 'window_dimensions' attribute.")));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowStridesHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
InvalidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr("'window_strides' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenBaseDilationsHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
InvalidAttr(),
ValidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr("'base_dilations' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenWindowDilationsHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
InvalidAttr(),
ValidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr(
"'window_dilations' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest,
FailsWhenPaddingHasWrongDimCount) {
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(ParseOpData(
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_,
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
InvalidPaddingAttr(),
13)
.Union()),
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'padding' attribute of 'stablehlo.reduce_window' does "
"not have the expected size"));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, FailsWithWrongOptions) {
const Operator* stablehlo_reduce_window_op =
BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions, 0);
TfLiteStablehloReduceWindowParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
HasSubstr(
"Could not get 'stablehlo.reduce_window' operation parameters."));
}
TEST_F(StablehloReduceWindowFlatbufferConversionsTest, DeathTests) {
const Operator* stablehlo_reduce_window_op = BuildTestOperator(
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_, ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidAttr(),
ValidPaddingAttr(), 13)
.Union());
TfLiteStablehloReduceWindowParams* output_data = nullptr;
#ifdef NDEBUG
GTEST_SKIP();
#endif
EXPECT_DEATH(
ParseOpData(nullptr, BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW, nullptr,
&mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
&mock_reporter_, nullptr, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,
BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
&mock_reporter_, &mock_allocator_, nullptr),
"");
}
class StablehloPadFlatbufferConversionsTest : public FlatbufferConversionsTest {
public:
static constexpr int kMaxDims =
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT;
static constexpr int64_t kValidValue = 5;
};
TEST_F(StablehloPadFlatbufferConversionsTest, Succeeds) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
builder_.CreateVector<int64_t>({2, 0, -2}),
builder_.CreateVector<int64_t>({3, 0, 3}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteOk);
EXPECT_THAT(std::make_tuple(output_data->edge_padding_low, 3),
ElementsAre(1, 0, -1));
EXPECT_THAT(std::make_tuple(output_data->edge_padding_high, 3),
ElementsAre(2, 0, -2));
EXPECT_THAT(std::make_tuple(output_data->interior_padding, 3),
ElementsAre(3, 0, 3));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingLowPadding) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
0,
builder_.CreateVector<int64_t>({2, 0, -2}),
builder_.CreateVector<int64_t>({3, 0, 3}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
AllOf(
HasSubstr("Input array not provided for operation 'stablehlo.pad'."),
HasSubstr("Check the 'edge_padding_low' attribute.")));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingHighPadding) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
0,
builder_.CreateVector<int64_t>({3, 0, 3}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
AllOf(
HasSubstr("Input array not provided for operation 'stablehlo.pad'."),
HasSubstr("Check the 'edge_padding_high' attribute.")));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingInteriorPadding) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
builder_.CreateVector<int64_t>({2, 0, -2}),
0)
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(
mock_reporter_.GetString(),
AllOf(
HasSubstr("Input array not provided for operation 'stablehlo.pad'."),
HasSubstr("Check the 'interior_padding' attribute.")));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsInconsistentSizes) {
const Operator* stablehlo_pad_op = BuildTestOperator(
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_,
builder_.CreateVector<int64_t>({1, 0, -1}),
builder_.CreateVector<int64_t>({2, 0, -2}),
builder_.CreateVector<int64_t>({3, 0, -3, 5}))
.Union());
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("'stablehlo.pad' operation parameter array sizes are "
"not consistent."));
}
TEST_F(StablehloPadFlatbufferConversionsTest, FailsWithWrongOptions) {
const Operator* stablehlo_pad_op = BuildTestOperator(BuiltinOptions_NONE, 0);
TfLiteStablehloPadParams* output_data = nullptr;
EXPECT_EQ(
ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, (void**)&output_data),
kTfLiteError);
EXPECT_THAT(mock_reporter_.GetString(),
HasSubstr("Could not get 'stablehlo.pad' operation parameters."));
}
TEST_F(StablehloPadFlatbufferConversionsTest, DeathTests) {
const Operator* stablehlo_pad_op = BuildTestOperator(BuiltinOptions_NONE, 0);
TfLiteStablehloPadParams* output_data = nullptr;
#ifdef NDEBUG
GTEST_SKIP();
#endif
EXPECT_DEATH(
ParseOpData(nullptr, BuiltinOperator_STABLEHLO_PAD, &mock_reporter_,
&mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
nullptr, &mock_allocator_, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, nullptr, (void**)&output_data),
"");
EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,
&mock_reporter_, &mock_allocator_, nullptr),
"");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/flatbuffer_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/flatbuffer_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71d031db-472c-419f-8962-e94574569eb3 | cpp | tensorflow/tensorflow | incremental_barrier | tensorflow/core/util/incremental_barrier.cc | tensorflow/core/util/incremental_barrier_test.cc | #include "tensorflow/core/util/incremental_barrier.h"
#include <atomic>
#include <functional>
#include <utility>
#include "absl/functional/bind_front.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
class InternalIncrementalBarrier {
public:
explicit InternalIncrementalBarrier(IncrementalBarrier::DoneCallback callback)
: left_(1), done_callback_(std::move(callback)) {}
void operator()() {
DCHECK_GE(left_.load(std::memory_order_relaxed), 0);
if (left_.fetch_sub(1, std::memory_order_acq_rel) - 1 == 0) {
IncrementalBarrier::DoneCallback done_callback =
std::move(done_callback_);
delete this;
done_callback();
}
}
IncrementalBarrier::BarrierCallback Inc() {
left_.fetch_add(1, std::memory_order_acq_rel);
return absl::bind_front(&InternalIncrementalBarrier::operator(), this);
}
private:
std::atomic<int> left_;
IncrementalBarrier::DoneCallback done_callback_;
};
IncrementalBarrier::IncrementalBarrier(DoneCallback done_callback)
: internal_barrier_(
new InternalIncrementalBarrier(std::move(done_callback))) {}
IncrementalBarrier::~IncrementalBarrier() { (*internal_barrier_)(); }
IncrementalBarrier::BarrierCallback IncrementalBarrier::Inc() {
return internal_barrier_->Inc();
}
} | #include "tensorflow/core/util/incremental_barrier.h"
#include <atomic>
#include "absl/functional/bind_front.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace {
class Counter {
public:
void Increment() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
++count_;
}
int GetCount() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return count_;
}
private:
mutex mu_;
int count_ = 0;
};
TEST(IncrementalBarrierTest, RunInstantlyWhenZeroClosure) {
Counter counter;
EXPECT_EQ(counter.GetCount(), 0);
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
EXPECT_EQ(counter.GetCount(), 0);
}
EXPECT_EQ(counter.GetCount(), 1);
}
TEST(IncrementalBarrierTest, RunAfterNumClosuresOneNowTwoLater) {
Counter counter;
IncrementalBarrier::BarrierCallback bc1, bc2;
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
CHECK_EQ(counter.GetCount(), 0);
bc1 = barrier.Inc();
bc2 = barrier.Inc();
IncrementalBarrier::BarrierCallback bc3 = barrier.Inc();
bc3();
CHECK_EQ(counter.GetCount(), 0);
}
CHECK_EQ(counter.GetCount(), 0);
bc1();
CHECK_EQ(counter.GetCount(), 0);
bc2();
CHECK_EQ(counter.GetCount(), 1);
}
TEST(IncrementalBarrierTest, RunAfterNumClosuresConcurrency) {
const int num_closure = 100, num_thread = 2;
std::atomic<int> schedule_count{0};
Counter counter;
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
CHECK_EQ(counter.GetCount(), 0);
tensorflow::thread::ThreadPool pool(tensorflow::Env::Default(),
"BarrierClosure", num_thread);
for (int i = 0; i < num_closure; ++i) {
pool.Schedule([&barrier, &schedule_count]() {
schedule_count.fetch_add(1);
IncrementalBarrier::BarrierCallback bc = barrier.Inc();
Env::Default()->SleepForMicroseconds(100);
bc();
});
}
CHECK_EQ(counter.GetCount(), 0);
}
CHECK_EQ(schedule_count.load(std::memory_order_relaxed), 100);
CHECK_EQ(counter.GetCount(), 1);
}
#if defined(PLATFORM_GOOGLE)
void BM_FunctionInc(benchmark::State& state) {
IncrementalBarrier barrier([] {});
for (auto _ : state) {
barrier.Inc()();
}
}
BENCHMARK(BM_FunctionInc);
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/incremental_barrier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/incremental_barrier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1314d878-27b7-445b-b083-af17ea4b6829 | cpp | tensorflow/tensorflow | executor | tensorflow/core/common_runtime/executor.cc | tensorflow/core/common_runtime/executor_test.cc | #include "tensorflow/core/common_runtime/executor.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/common_runtime/costmodel_manager.h"
#include "tensorflow/core/common_runtime/entry.h"
#include "tensorflow/core/common_runtime/executor_factory.h"
#include "tensorflow/core/common_runtime/graph_view.h"
#include "tensorflow/core/common_runtime/immutable_executor_state.h"
#include "tensorflow/core/common_runtime/pending_counts.h"
#include "tensorflow/core/common_runtime/propagator_state.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/common_runtime/simple_propagator_state.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_segment.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/edgeset.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/gtl/manual_constructor.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/context_types.h"
#include "tensorflow/core/profiler/lib/scoped_annotation.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/managed_stack_trace.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "tsl/platform/tracing.h"
namespace tensorflow {
namespace {
static const Tensor* const kEmptyTensor = new Tensor;
namespace nodestats {
inline int64_t NowInNsec() { return EnvTime::NowNanos(); }
void SetScheduled(NodeExecStatsInterface* stats, int64_t micros) {
if (!stats) return;
stats->SetScheduled(micros * EnvTime::kMicrosToNanos);
}
void SetAllStart(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordExecutorStarted();
}
void SetOpStart(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordComputeStarted();
}
void SetOpEnd(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordComputeEnded();
}
void SetAllEnd(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordExecutorEnded();
}
void SetOutput(NodeExecStatsInterface* stats, int slot, const Tensor* v) {
if (!stats) return;
stats->SetOutput(slot, v);
}
void SetMemory(NodeExecStatsInterface* stats, OpKernelContext* ctx) {
if (!stats) return;
stats->SetMemory(ctx);
}
}
struct KernelTimer {
uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64 ElapsedCycles() {
return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;
}
};
typedef absl::InlinedVector<TensorValue, 4UL> TensorValueVec;
typedef absl::InlinedVector<AllocatorAttributes, 4UL> AllocatorAttributeVec;
class ExecutorImpl : public Executor {
public:
explicit ExecutorImpl(const LocalExecutorParams& p) : immutable_state_(p) {}
Status Initialize(const Graph& graph) {
TF_RETURN_IF_ERROR(immutable_state_.Initialize(graph));
kernel_stats_.Initialize(immutable_state_.graph_view());
return absl::OkStatus();
}
private:
void RunAsyncInternal(const Args& args, DoneCallback done) override;
template <class PropagatorStateType>
friend class ExecutorState;
class KernelStats {
public:
KernelStats() = default;
void Initialize(const GraphView& gview) {
is_expensive_.resize(gview.num_nodes());
cost_estimates_ =
std::make_unique<std::atomic_uint_fast64_t[]>(gview.num_nodes());
for (int32_t i = 0; i < gview.num_nodes(); ++i) {
if (gview.node(i)) {
is_expensive_[i] =
gview.node(i)->kernel && gview.node(i)->kernel->IsExpensive();
cost_estimates_[i] = kInitialCostEstimateCycles;
}
}
}
bool IsExpensive(const NodeItem& node) const {
return is_expensive_[node.node_id] &&
(cost_estimates_[node.node_id].load(std::memory_order_relaxed) >
kOpIsExpensiveThresholdCycles);
}
bool HasExpensiveMarker(const NodeItem& node) const {
return is_expensive_[node.node_id];
}
void UpdateCostEstimate(const NodeItem& node, uint64 elapsed_cycles) {
std::atomic_uint_fast64_t& cost_estimate = cost_estimates_[node.node_id];
auto prev_estimate = cost_estimate.load(std::memory_order_relaxed);
uint64 new_estimate =
((kCostDecay - 1) * prev_estimate + elapsed_cycles) / kCostDecay;
cost_estimate.store(new_estimate, std::memory_order_relaxed);
}
private:
static constexpr uint64 kInitialCostEstimateCycles = 100 * 1000 * 1000;
static constexpr uint64 kOpIsExpensiveThresholdCycles = 8000;
static constexpr uint64 kCostDecay = 10;
std::vector<bool> is_expensive_;
std::unique_ptr<std::atomic_uint_fast64_t[]> cost_estimates_;
};
ImmutableExecutorState immutable_state_;
KernelStats kernel_stats_;
ExecutorImpl(const ExecutorImpl&) = delete;
void operator=(const ExecutorImpl&) = delete;
};
template <class PropagatorStateType>
class ExecutorState {
public:
ExecutorState(const Executor::Args& args,
const ImmutableExecutorState& immutable_state_,
ExecutorImpl::KernelStats* kernel_stats_);
~ExecutorState();
void RunAsync(Executor::DoneCallback done);
private:
typedef typename PropagatorStateType::TaggedNode TaggedNode;
typedef
typename PropagatorStateType::TaggedNodeReadyQueue TaggedNodeReadyQueue;
typedef typename PropagatorStateType::TaggedNodeSeq TaggedNodeSeq;
struct AsyncState;
void Process(const TaggedNode& node, int64_t scheduled_nsec);
void ProcessInline(TaggedNodeReadyQueue* inline_ready,
int64_t scheduled_nsec);
Status ProcessSync(const NodeItem& item, OpKernelContext::Params* params,
EntryVector* outputs, NodeExecStatsInterface* stats);
void ProcessAsync(const NodeItem& item, const OpKernelContext::Params& params,
const TaggedNode& tagged_node, Entry* first_input,
NodeExecStatsInterface* stats,
activity_watcher::ActivityId activity_id);
void ProcessNoop(NodeExecStatsInterface* stats);
void ProcessConstTensor(const NodeItem& item, EntryVector* outputs,
NodeExecStatsInterface* stats);
Status PrepareInputs(const NodeItem& item, Entry* first_input,
TensorValueVec* inputs,
AllocatorAttributeVec* input_alloc_attrs,
bool* is_input_dead);
Status ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
Entry* outputs, NodeExecStatsInterface* stats);
bool NodeDone(const Status& s, TaggedNodeSeq* ready,
NodeExecStatsInterface* stats,
TaggedNodeReadyQueue* inline_ready);
void ScheduleReady(TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready);
template <typename Closure>
void RunTask(Closure&& c, int sample_rate = 0);
void Finish();
void ScheduleFinish();
DeviceContext* device_context_ = nullptr;
const bool vlog_;
const bool log_memory_;
int64_t step_id_;
int64_t trace_id_;
int64_t start_time_usecs_ = 0;
absl::optional<absl::Time> deadline_;
static constexpr uint64 kInlineScheduleReadyThreshold = 500;
RendezvousInterface* rendezvous_;
CollectiveExecutor* collective_executor_ = nullptr;
const ConfigProto* const session_config_;
SessionState* session_state_;
string session_handle_;
const SessionMetadata* session_metadata_ = nullptr;
TensorStore* tensor_store_;
ScopedStepContainer* step_container_;
StepStatsCollectorInterface* const stats_collector_;
const tsl::tracing::EventCollector* const event_collector_;
Context context_;
checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache_;
CallFrameInterface* call_frame_;
const ImmutableExecutorState& immutable_state_;
ExecutorImpl::KernelStats* const kernel_stats_;
CancellationManager* cancellation_manager_;
tsl::CoordinationServiceAgent* coordination_service_agent_;
absl::optional<ManagedStackTrace> stack_trace_ = absl::nullopt;
std::unique_ptr<DeviceBase> user_device_;
Executor::Args::Runner runner_;
bool sync_on_finish_;
const bool run_all_kernels_inline_;
PropagatorStateType propagator_;
Executor::DoneCallback done_cb_;
std::atomic_int_fast32_t num_outstanding_ops_;
mutex num_deferred_ops_mu_;
int64_t num_deferred_ops_ TF_GUARDED_BY(num_deferred_ops_mu_) = 0;
bool finish_when_deferred_ops_done_ TF_GUARDED_BY(num_deferred_ops_mu_) =
false;
mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
};
template <class PropagatorStateType>
ExecutorState<PropagatorStateType>::ExecutorState(
const Executor::Args& args, const ImmutableExecutorState& immutable_state,
ExecutorImpl::KernelStats* kernel_stats)
: vlog_(VLOG_IS_ON(1)),
log_memory_(LogMemory::IsEnabled()),
step_id_(args.step_id),
trace_id_(args.function_trace_id ? *args.function_trace_id : step_id_),
start_time_usecs_(args.start_time_usecs),
deadline_(args.deadline),
rendezvous_(args.rendezvous),
collective_executor_(args.collective_executor),
session_config_(args.session_config),
session_state_(args.session_state),
session_handle_(args.session_handle),
session_metadata_(immutable_state.params().session_metadata),
tensor_store_(args.tensor_store),
step_container_(args.step_container),
stats_collector_(args.stats_collector),
event_collector_(tsl::tracing::GetEventCollector(
tsl::tracing::EventCategory::kCompute)),
context_(ContextKind::kThread),
slice_reader_cache_(new checkpoint::TensorSliceReaderCacheWrapper),
call_frame_(args.call_frame),
immutable_state_(immutable_state),
kernel_stats_(kernel_stats),
cancellation_manager_(args.cancellation_manager),
coordination_service_agent_(args.coordination_service_agent),
stack_trace_(args.stack_trace),
runner_(args.runner),
sync_on_finish_(args.sync_on_finish),
run_all_kernels_inline_(args.run_all_kernels_inline),
propagator_(immutable_state, step_id_, vlog_),
num_outstanding_ops_(0) {
if (args.user_intra_op_threadpool != nullptr) {
Device* device = immutable_state_.params().device;
user_device_ = RenamedDevice::NewRenamedDevice(
device->name(), device, false, false, args.user_intra_op_threadpool);
}
}
template <class PropagatorStateType>
ExecutorState<PropagatorStateType>::~ExecutorState() {
if (device_context_) {
device_context_->Unref();
}
delete slice_reader_cache_;
}
template <class PropagatorStateType>
template <typename Closure>
void ExecutorState<PropagatorStateType>::RunTask(Closure&& c, int sample_rate) {
alignas(64) static std::atomic<int64_t> num_enqueue_ops{0};
alignas(64) static std::atomic<int64_t> num_dequeue_ops{0};
auto n_enqueues = num_enqueue_ops.fetch_add(1, std::memory_order_relaxed);
if (n_enqueues % std::max(16, sample_rate) == 0) {
auto n_dequeues = num_dequeue_ops.load(std::memory_order_relaxed);
metrics::UpdateGraphPendingQueueLength(n_enqueues - n_dequeues);
}
runner_([c = std::forward<Closure>(c)]() mutable {
num_dequeue_ops.fetch_add(1, std::memory_order_relaxed);
std::forward<Closure>(c)();
});
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::RunAsync(Executor::DoneCallback done) {
TaggedNodeSeq ready;
Device* device = immutable_state_.params().device;
const Status get_context_status =
device->TryGetDeviceContext(&device_context_);
if (!get_context_status.ok()) {
delete this;
done(get_context_status);
return;
}
ready.reserve(immutable_state_.root_nodes().size());
propagator_.ActivateRoots(immutable_state_.root_nodes(), &ready);
num_outstanding_ops_ = ready.size();
if (ready.empty()) {
delete this;
done(absl::OkStatus());
} else {
done_cb_ = std::move(done);
ScheduleReady(&ready, nullptr);
}
}
template <class PropagatorStateType>
struct ExecutorState<PropagatorStateType>::AsyncState {
AsyncState(const OpKernelContext::Params& p, const TaggedNode& _tagged_node,
const NodeItem* _item, Entry* _first_input,
NodeExecStatsInterface* _stats)
: saved_inputs(p.inputs.begin(), p.inputs.end()),
saved_input_alloc_attrs(p.input_alloc_attrs.begin(),
p.input_alloc_attrs.end()),
params(p),
tagged_node(_tagged_node),
item(_item),
first_input(_first_input),
ctx(ParamsButClearingEigenGPUDevice(¶ms), item->num_outputs),
stats(_stats) {
params.inputs = saved_inputs;
params.input_alloc_attrs = saved_input_alloc_attrs;
}
TensorValueVec saved_inputs;
AllocatorAttributeVec saved_input_alloc_attrs;
OpKernelContext::Params params;
TaggedNode tagged_node;
const NodeItem* item;
Entry* first_input;
OpKernelContext ctx;
NodeExecStatsInterface* stats;
private:
OpKernelContext::Params* ParamsButClearingEigenGPUDevice(
OpKernelContext::Params* p) {
p->eigen_gpu_device = nullptr;
return p;
}
};
bool MightTrace(const tsl::tracing::EventCollector* event_collector,
bool is_expensive) {
if (event_collector != nullptr) {
return true;
}
if (tsl::profiler::ScopedAnnotation::IsEnabled()) return true;
return tsl::profiler::TraceMe::Active(
tsl::profiler::GetTFTraceMeLevel(is_expensive));
}
template <class PropagatorStateType>
Status ExecutorState<PropagatorStateType>::ProcessSync(
const NodeItem& item, OpKernelContext::Params* params, EntryVector* outputs,
NodeExecStatsInterface* stats) {
Status s;
OpKernelContext ctx(params, item.num_outputs);
nodestats::SetOpStart(stats);
OpKernel* op_kernel = item.kernel;
Device* device = immutable_state_.params().device;
const bool is_expensive = kernel_stats_->IsExpensive(item);
if (TF_PREDICT_FALSE(MightTrace(event_collector_, is_expensive))) {
tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kCompute,
op_kernel->name_view());
profiler::AnnotatedTraceMe activity(
[op_kernel, &ctx] {
return op_kernel->TraceString(
ctx, tsl::profiler::TfOpDetailsEnabled());
},
tsl::profiler::GetTFTraceMeLevel(is_expensive));
device->Compute(op_kernel, &ctx);
} else if (kernel_stats_->HasExpensiveMarker(item)) {
KernelTimer timer;
device->Compute(op_kernel, &ctx);
constexpr int kKernelExecutionTrackingInvocationSkipCount = 16;
if (is_expensive ||
timer.start_cycles % kKernelExecutionTrackingInvocationSkipCount == 0) {
kernel_stats_->UpdateCostEstimate(item, timer.ElapsedCycles());
}
} else {
device->Compute(op_kernel, &ctx);
}
nodestats::SetOpEnd(stats);
if (outputs->size() < item.num_outputs) outputs->resize(item.num_outputs);
s = ProcessOutputs(item, &ctx, outputs->data(), stats);
nodestats::SetMemory(stats, &ctx);
return s;
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessAsync(
const NodeItem& item, const OpKernelContext::Params& params,
const TaggedNode& tagged_node, Entry* first_input,
NodeExecStatsInterface* stats, activity_watcher::ActivityId activity_id) {
AsyncOpKernel* async_kernel = item.kernel->AsAsync();
DCHECK(async_kernel != nullptr);
AsyncState* state =
new AsyncState(params, tagged_node, &item, first_input, stats);
nodestats::SetOpStart(stats);
{
profiler::AnnotatedTraceMe activity(
[async_kernel, state] {
return async_kernel->TraceString(
state->ctx, tsl::profiler::TfOpDetailsEnabled());
},
tsl::profiler::GetTFTraceMeLevel(false));
tsl::profiler::TraceMeProducer producer(
[&] {
return tsl::profiler::TraceMeEncode(
"ExecutorState::ProcessAsync::Start",
{{"name", async_kernel->name()},
{"kernel_type", async_kernel->type_string()},
{"step_id", step_id_}});
},
tsl::profiler::ContextType::kTfExecutor);
auto done = [this, state, activity_id, ctx_id = producer.GetContextId()]() {
tsl::profiler::TraceMeConsumer consumer(
[&] {
return profiler::TraceMeEncode(
"ExecutorState::ProcessAsync::Done",
{{"name", state->item->kernel->name()},
{"kernel_type", state->item->kernel->type_string()},
{"step_id", step_id_}});
},
tsl::profiler::ContextType::kTfExecutor, ctx_id);
Device* device = immutable_state_.params().device;
NodeExecStatsInterface* stats = state->stats;
Entry* first_input = state->first_input;
nodestats::SetOpEnd(stats);
EntryVector outputs(state->item->num_outputs);
Status s =
ProcessOutputs(*state->item, &state->ctx, outputs.data(), stats);
nodestats::SetMemory(stats, &state->ctx);
if (vlog_) {
VLOG(2) << "Async kernel done: " << state->item->node_id << " step "
<< step_id_ << " "
<< SummarizeNodeDef(state->item->kernel->def())
<< (state->tagged_node.get_is_dead() ? " is dead" : "")
<< " device: " << device->name();
}
const int num_inputs = state->item->num_inputs;
for (int i = 0; i < num_inputs; ++i) {
(first_input + i)->ClearVal();
}
propagator_.MaybeMarkCompleted(state->tagged_node);
activity_watcher::ActivityEnd(activity_id);
TaggedNodeSeq ready;
if (s.ok()) {
propagator_.PropagateOutputs(state->tagged_node, &outputs, &ready);
}
outputs.clear();
const bool completed = NodeDone(s, &ready, stats, nullptr);
delete state;
if (completed) ScheduleFinish();
};
immutable_state_.params().device->ComputeAsync(async_kernel, &state->ctx,
std::move(done));
}
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessNoop(
NodeExecStatsInterface* stats) {
nodestats::SetOpStart(stats);
nodestats::SetOpEnd(stats);
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessConstTensor(
const NodeItem& item, EntryVector* outputs, NodeExecStatsInterface* stats) {
nodestats::SetOpStart(stats);
nodestats::SetOpEnd(stats);
Entry& output = (*outputs)[0];
output.state = Entry::State::HAS_CONST_TENSOR;
output.const_tensor = item.const_tensor;
output.alloc_attr = item.output_attrs()[0];
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::Process(const TaggedNode& tagged_node,
int64_t scheduled_nsec) {
tsl::profiler::TraceMe traceme("ExecutorState::Process Scheduled",
tsl::profiler::TraceMeLevel::kVerbose);
TaggedNodeReadyQueue inline_ready;
inline_ready.push_back(tagged_node);
return ProcessInline(&inline_ready, scheduled_nsec);
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessInline(
TaggedNodeReadyQueue* inline_ready, int64_t scheduled_nsec) {
WithContext wc(context_);
auto ready = std::make_unique<TaggedNodeSeq>();
auto inputs = std::make_unique<TensorValueVec>();
AllocatorAttributeVec input_alloc_attrs;
auto params = std::make_unique<OpKernelContext::Params>();
params->step_id = step_id_;
Device* device = immutable_state_.params().device;
if (user_device_) {
params->device = user_device_.get();
} else {
params->device = device;
}
params->start_time_usecs = start_time_usecs_;
params->deadline = deadline_;
params->log_memory = log_memory_;
params->rendezvous = rendezvous_;
params->collective_executor = collective_executor_;
params->session_config = session_config_;
params->session_state = session_state_;
params->session_handle = session_handle_;
params->session_metadata = session_metadata_;
params->tensor_store = tensor_store_;
params->cancellation_manager = cancellation_manager_;
params->coordination_service_agent = coordination_service_agent_;
params->stack_trace = stack_trace_;
params->call_frame = call_frame_;
params->function_library = immutable_state_.params().function_library;
params->resource_manager = device->resource_manager();
params->step_container = step_container_;
params->slice_reader_cache = slice_reader_cache_;
params->runner = &runner_;
params->run_all_kernels_inline = run_all_kernels_inline_;
params->stats_collector = stats_collector_;
params->inc_num_deferred_ops_function = [this]() {
mutex_lock lock(num_deferred_ops_mu_);
num_deferred_ops_++;
};
params->dec_num_deferred_ops_function = [this]() {
bool finish_when_deferred_ops_done = false;
{
mutex_lock lock(num_deferred_ops_mu_);
num_deferred_ops_--;
if (num_deferred_ops_ == 0) {
finish_when_deferred_ops_done = finish_when_deferred_ops_done_;
}
}
if (finish_when_deferred_ops_done) Finish();
};
params->op_device_context = device_context_;
Status s;
NodeExecStatsInterface* stats = nullptr;
EntryVector outputs(1);
bool completed = false;
int64_t last_iter_num = -1;
std::unique_ptr<tsl::profiler::TraceMeConsumer> iteration_scope;
while (!inline_ready->empty()) {
TaggedNode tagged_node = inline_ready->front();
int64_t current_iter_num = tagged_node.get_iter_num();
if (current_iter_num != last_iter_num) {
iteration_scope = std::make_unique<tsl::profiler::TraceMeConsumer>(
[&] {
return profiler::TraceMeEncode(
"ExecutorState::Process",
{{"id", step_id_}, {"iter_num", tagged_node.get_iter_num()}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id_,
tsl::profiler::TraceMeLevel::kInfo);
last_iter_num = current_iter_num;
}
inline_ready->pop_front();
const NodeItem& item = tagged_node.get_node_item();
const int id = item.node_id;
propagator_.MaybeMarkStarted(tagged_node);
const activity_watcher::ActivityId activity_id =
activity_watcher::ActivityStart(
[&]() {
return std::make_unique<activity_watcher::Activity>(
"ExecutorState::Process",
activity_watcher::ActivityCategory::kMisc,
activity_watcher::Activity::Attributes{
{"node_name", item.kernel->def().name()},
{"op", item.kernel->def().op()},
{"iter_num", absl::StrCat(tagged_node.get_iter_num())},
{"step_id", absl::StrCat(params->step_id)},
{"node_id", absl::StrCat(id)},
{"device", device->name()},
{"inputs",
absl::StrJoin(item.kernel->def().input(), "; ")},
{"original_node_names",
absl::StrJoin(item.kernel->def()
.experimental_debug_info()
.original_node_names(),
"; ")},
{"original_func_names",
absl::StrJoin(item.kernel->def()
.experimental_debug_info()
.original_func_names(),
"; ")},
});
},
2);
params->track_allocations = false;
stats = nullptr;
if (stats_collector_ && !tagged_node.get_is_dead()) {
stats = stats_collector_->CreateNodeExecStats(&item.kernel->def());
params->track_allocations = stats ? stats->TrackAllocations() : false;
nodestats::SetScheduled(stats, scheduled_nsec);
nodestats::SetAllStart(stats);
}
if (vlog_) {
VLOG(1) << "Process node: " << id << " step " << params->step_id << " "
<< SummarizeNodeDef(item.kernel->def())
<< (tagged_node.get_is_dead() ? " is dead" : "")
<< " device: " << device->name();
}
Entry* first_input = propagator_.GetInputTensors(tagged_node);
bool launched_asynchronously = false;
if (tagged_node.get_is_dead() && !item.is_transfer_node) {
if (outputs.size() < item.num_outputs) outputs.resize(item.num_outputs);
} else if (TF_PREDICT_FALSE(item.is_noop)) {
ProcessNoop(stats);
} else if (item.const_tensor != nullptr && !params->track_allocations) {
ProcessConstTensor(item, &outputs, stats);
} else {
bool is_input_dead = false;
s = PrepareInputs(item, first_input, inputs.get(), &input_alloc_attrs,
&is_input_dead);
if (!s.ok()) {
const int num_inputs = item.num_inputs;
for (int i = 0; i < num_inputs; ++i) {
(first_input + i)->ClearVal();
}
propagator_.MaybeMarkCompleted(tagged_node);
activity_watcher::ActivityEnd(activity_id);
completed = NodeDone(s, ready.get(), stats, inline_ready);
continue;
}
params->op_kernel = item.kernel;
params->frame_iter = propagator_.GetFrameAndIter(tagged_node);
params->is_input_dead = is_input_dead;
params->output_attr_array = item.output_attrs();
params->forward_from_array = item.forward_from();
params->outputs_required_array = item.outputs_required.get();
params->inputs = *inputs;
params->input_alloc_attrs = input_alloc_attrs;
if (item.kernel_is_async) {
ProcessAsync(item, *params, tagged_node, first_input, stats,
activity_id);
launched_asynchronously = true;
} else {
s = ProcessSync(item, params.get(), &outputs, stats);
}
}
if (!launched_asynchronously) {
if (vlog_) {
VLOG(2) << "Synchronous kernel done: " << id << " step "
<< params->step_id << " "
<< SummarizeNodeDef(item.kernel->def())
<< (tagged_node.get_is_dead() ? " is dead: " : "")
<< " device: " << device->name();
}
const int num_inputs = item.num_inputs;
for (int i = 0; i < num_inputs; ++i) {
(first_input + i)->ClearVal();
}
propagator_.MaybeMarkCompleted(tagged_node);
activity_watcher::ActivityEnd(activity_id);
if (s.ok()) {
propagator_.PropagateOutputs(tagged_node, &outputs, ready.get());
}
const int num_outputs = item.num_outputs;
for (int i = 0; i < num_outputs; ++i) {
outputs[i].ClearVal();
}
if (stats) {
scheduled_nsec = nodestats::NowInNsec();
}
completed = NodeDone(s, ready.get(), stats, inline_ready);
}
}
if (completed) ScheduleFinish();
}
template <class PropagatorStateType>
Status ExecutorState<PropagatorStateType>::PrepareInputs(
const NodeItem& item, Entry* first_input, TensorValueVec* inputs,
AllocatorAttributeVec* input_alloc_attrs, bool* is_input_dead) {
inputs->resize(item.num_inputs);
input_alloc_attrs->resize(item.num_inputs);
*is_input_dead = false;
for (int i = 0; i < item.num_inputs; ++i) {
const bool expect_ref = TF_PREDICT_FALSE(item.is_any_input_ref_typed) &&
IsRefType(item.input_type(i));
Entry* entry = first_input + i;
(*input_alloc_attrs)[i] = entry->alloc_attr;
TensorValue* inp = &(*inputs)[i];
switch (entry->state) {
case Entry::State::NO_VALUE: {
inp->mutex_if_ref = nullptr;
if (item.is_merge) {
inp->tensor = nullptr;
} else {
DCHECK(item.is_transfer_node)
<< item.kernel->name() << " - input " << i;
entry->state = Entry::State::HAS_CONST_TENSOR;
entry->const_tensor = kEmptyTensor;
inp->tensor = const_cast<Tensor*>(kEmptyTensor);
*is_input_dead = true;
}
break;
}
case Entry::State::HAS_VALUE: {
if (TF_PREDICT_FALSE(expect_ref)) {
return AttachDef(
errors::InvalidArgument(i, "-th input expects a ref type"),
item.kernel->def());
}
inp->mutex_if_ref = nullptr;
inp->tensor = entry->val.get();
break;
}
case Entry::State::HAS_CONST_TENSOR: {
if (TF_PREDICT_FALSE(expect_ref)) {
return AttachDef(
errors::InvalidArgument(i, "-th input expects a ref type"),
item.kernel->def());
}
inp->mutex_if_ref = nullptr;
inp->tensor = const_cast<Tensor*>(entry->const_tensor);
break;
}
case Entry::State::HAS_REF_TENSOR: {
{
tf_shared_lock ml(*entry->ref_tensor.mu);
if (TF_PREDICT_FALSE(!entry->ref_tensor.tensor->IsInitialized() &&
!item.is_initialization_op)) {
return AttachDef(errors::FailedPrecondition(
"Attempting to use uninitialized value ",
item.kernel->requested_input(i)),
item.kernel->def());
}
}
if (expect_ref) {
inp->mutex_if_ref = entry->ref_tensor.mu;
inp->tensor = entry->ref_tensor.tensor;
} else {
{
mutex* ref_mu = entry->ref_tensor.mu;
Tensor* ref_tensor = entry->ref_tensor.tensor;
tf_shared_lock l(*ref_mu);
entry->val.Init(*ref_tensor);
}
entry->state = Entry::State::HAS_VALUE;
inp->mutex_if_ref = nullptr;
inp->tensor = entry->val.get();
if (TF_PREDICT_FALSE(item.input_type(i) != inp->tensor->dtype())) {
return AttachDef(
errors::InvalidArgument(
i, "-th input expects type ",
DataTypeString(item.input_type(i)),
" but automatically dereferenced input tensor has type ",
DataTypeString(inp->tensor->dtype())),
item.kernel->def());
}
}
break;
}
}
}
return absl::OkStatus();
}
template <class PropagatorStateType>
Status ExecutorState<PropagatorStateType>::ProcessOutputs(
const NodeItem& item, OpKernelContext* ctx, Entry* outputs,
NodeExecStatsInterface* stats) {
Status s = ctx->status();
if (!s.ok()) {
s = AttachDef(s, item.kernel->def());
if (vlog_ && VLOG_IS_ON(1)) {
LOG(WARNING) << this << " Compute status: " << s;
}
if (s.code() == error::RESOURCE_EXHAUSTED) {
if (stats_collector_) {
string err =
stats_collector_->ReportAllocsOnResourceExhausted(s.message());
s = errors::CreateWithUpdatedMessage(s,
strings::StrCat(s.message(), err));
} else {
s = errors::CreateWithUpdatedMessage(
s,
strings::StrCat(
s.message(),
"\nHint: If you want to see a list of allocated tensors when "
"OOM happens, add report_tensor_allocations_upon_oom "
"to RunOptions for current allocation info. This isn't "
"available when running in Eager mode.\n"));
}
} else if (s.code() == error::UNAVAILABLE &&
!item.is_distributed_communication) {
s = errors::ReplaceErrorFromNonCommunicationOps(s, item.kernel->name());
}
return ADD_SOURCE_LOCATION(s);
}
for (int i = 0; i < item.num_outputs; ++i) {
const TensorValue val = ctx->release_output(i);
Entry* out = &outputs[i];
DCHECK(out->state == Entry::State::NO_VALUE);
if (val.tensor == nullptr) {
if (!(item.is_recv_or_switch ||
(item.outputs_required && !item.outputs_required[i]))) {
s.Update(errors::Internal("Missing ", i, "-th output from ",
FormatNodeDefForError(item.kernel->def())));
}
} else {
out->alloc_attr = ctx->output_alloc_attr(i);
DataType dtype = val.dtype_safe();
if (dtype == item.output_type(i)) {
if (stats && val.tensor->IsInitialized()) {
nodestats::SetOutput(stats, i, val.tensor);
}
if (val.is_ref()) {
out->state = Entry::State::HAS_REF_TENSOR;
out->ref_tensor.tensor = val.tensor;
out->ref_tensor.mu = val.mutex_if_ref;
if (log_memory_) {
Tensor to_log;
{
tf_shared_lock l(*out->ref_tensor.mu);
to_log = *out->ref_tensor.tensor;
}
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, to_log);
}
} else {
out->state = Entry::State::HAS_VALUE;
out->val.Init(std::move(*val.tensor));
if (log_memory_) {
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, *out->val);
}
}
} else {
s.Update(
errors::Internal("Output ", i, " of type ", DataTypeString(dtype),
" does not match declared output type ",
DataTypeString(item.output_type(i)), " for node ",
FormatNodeDefForError(item.kernel->def())));
}
}
if (!val.is_ref()) {
delete val.tensor;
}
}
return s;
}
template <class PropagatorStateType>
bool ExecutorState<PropagatorStateType>::NodeDone(
const Status& s, TaggedNodeSeq* ready, NodeExecStatsInterface* stats,
TaggedNodeReadyQueue* inline_ready) {
if (stats) {
nodestats::SetAllEnd(stats);
DCHECK_NE(stats_collector_, nullptr);
stats->Done(immutable_state_.params().device->name());
}
if (TF_PREDICT_TRUE(s.ok())) {
const size_t ready_size = ready->size();
if (ready_size == 0) {
return num_outstanding_ops_.fetch_sub(1) == 1;
} else {
if (ready_size > 1) {
num_outstanding_ops_.fetch_add(ready_size - 1,
std::memory_order_relaxed);
}
ScheduleReady(ready, inline_ready);
return false;
}
} else {
bool abort_run = false;
Status maybe_derived_s(s);
{
mutex_lock l(mu_);
if (status_.ok()) {
abort_run = true;
if (cancellation_manager_ && cancellation_manager_->IsCancelled() &&
(errors::IsCancelled(s) || errors::IsAborted(s))) {
status_ = StatusGroup::MakeDerived(s);
maybe_derived_s = status_;
} else {
status_ = s;
}
}
}
if (abort_run) {
TRACEPRINTF("StartAbort: %s", s.ToString());
if (cancellation_manager_) {
VLOG(1) << "[" << immutable_state_.params().device->name()
<< "] Executor start aborting: " << s;
}
if (rendezvous_) {
rendezvous_->StartAbort(s);
}
if (cancellation_manager_) {
cancellation_manager_->StartCancelWithStatus(maybe_derived_s);
} else if (collective_executor_) {
collective_executor_->StartAbort(s);
}
}
return num_outstanding_ops_.fetch_sub(1) == 1;
}
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ScheduleReady(
TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready) {
tsl::profiler::TraceMe activity(
[&]() {
return strings::StrCat(
"ExecutorState::ScheduleReady#",
"ready_size=", (ready == nullptr ? -1 : ready->size()),
",inline_ready_size=",
(inline_ready == nullptr ? -1 : inline_ready->size()), "#");
},
tsl::profiler::GetTFTraceMeLevel(false));
DCHECK(!ready->empty());
int64_t scheduled_nsec = 0;
if (stats_collector_) {
scheduled_nsec = nodestats::NowInNsec();
}
if (run_all_kernels_inline_) {
if (inline_ready == nullptr) {
RunTask([this, ready = std::move(*ready), scheduled_nsec]() {
for (auto& tagged_node : ready) {
Process(tagged_node, scheduled_nsec);
}
});
} else {
for (auto& tagged_node : *ready) {
inline_ready->push_back(tagged_node);
}
}
} else {
const TaggedNode* curr_expensive_node = nullptr;
TaggedNodeSeq expensive_nodes;
if (inline_ready == nullptr) {
for (auto& tagged_node : *ready) {
RunTask([=]() { Process(tagged_node, scheduled_nsec); },
ready->size());
}
} else {
for (auto& tagged_node : *ready) {
const NodeItem& item = *tagged_node.node_item;
if (tagged_node.get_is_dead() || !kernel_stats_->IsExpensive(item)) {
inline_ready->push_back(tagged_node);
} else {
if (curr_expensive_node) {
expensive_nodes.push_back(*curr_expensive_node);
}
curr_expensive_node = &tagged_node;
}
}
}
if (curr_expensive_node) {
if (inline_ready->empty()) {
inline_ready->push_back(*curr_expensive_node);
} else {
expensive_nodes.push_back(*curr_expensive_node);
}
}
if (!expensive_nodes.empty()) {
if (expensive_nodes.size() < kInlineScheduleReadyThreshold) {
for (auto& tagged_node : expensive_nodes) {
RunTask(std::bind(&ExecutorState::Process, this, tagged_node,
scheduled_nsec),
expensive_nodes.size());
}
} else {
auto it = expensive_nodes.begin();
while (it < expensive_nodes.end()) {
auto end = it;
std::advance(end, kInlineScheduleReadyThreshold);
if (end > expensive_nodes.end()) {
end = expensive_nodes.end();
}
TaggedNodeSeq ready_chunk{it, end};
RunTask(
[this, ready_chunk = std::move(ready_chunk), scheduled_nsec]() {
tsl::profiler::TraceMe activity(
[&]() {
return strings::StrCat(
"ExecutorState::ScheduleReady::"
"ChildThreadExpensiveNodes#",
"ready_chunk_size=", ready_chunk.size(), "#");
},
tsl::profiler::GetTFTraceMeLevel(false));
for (auto& tagged_node : ready_chunk) {
RunTask(std::bind(&ExecutorState::Process, this, tagged_node,
scheduled_nsec),
ready_chunk.size());
}
});
it = end;
}
}
}
}
ready->clear();
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ScheduleFinish() {
{
mutex_lock lock(num_deferred_ops_mu_);
if (num_deferred_ops_ > 0) {
finish_when_deferred_ops_done_ = true;
return;
}
}
Finish();
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::Finish() {
mu_.lock();
auto status = status_;
auto done_cb = std::move(done_cb_);
auto runner = std::move(runner_);
mu_.unlock();
int64_t trace_id = trace_id_;
int64_t step_id = step_id_;
CHECK(done_cb != nullptr);
Device* device = immutable_state_.params().device;
if (vlog_ && !status.ok() && VLOG_IS_ON(1)) {
propagator_.DumpState();
}
if (!device->AllowsSyncOnCompletion()) {
status.Update(device->RefreshStatus());
if (!status.ok()) {
if (rendezvous_) {
rendezvous_->StartAbort(status);
}
if (cancellation_manager_) {
cancellation_manager_->StartCancelWithStatus(status);
} else if (collective_executor_) {
collective_executor_->StartAbort(status);
}
}
delete this;
runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {
tsl::profiler::TraceMeConsumer activity(
[&] {
return tsl::profiler::TraceMeEncode("ExecutorDoneCallback",
{{"id", step_id}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id,
tsl::profiler::TraceMeLevel::kInfo);
done_cb(status);
});
return;
}
if (sync_on_finish_ && status.ok()) {
device->Sync([this, step_id, trace_id, runner = std::move(runner),
done_cb = std::move(done_cb)](const Status& status) mutable {
delete this;
runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {
tsl::profiler::TraceMeConsumer activity(
[&] {
return tsl::profiler::TraceMeEncode("ExecutorDoneCallback",
{{"id", step_id}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id,
tsl::profiler::TraceMeLevel::kInfo);
done_cb(status);
});
});
} else {
delete this;
runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {
tsl::profiler::TraceMeConsumer activity(
[&] {
return tsl::profiler::TraceMeEncode("ExecutorDoneCallback",
{{"id", step_id}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id,
tsl::profiler::TraceMeLevel::kInfo);
done_cb(status);
});
}
}
void ExecutorImpl::RunAsyncInternal(const Args& args, DoneCallback done) {
if (OpOrderDeterminismRequired()) {
(new ExecutorState<OrderedPropagatorState>(args, immutable_state_,
&kernel_stats_))
->RunAsync(std::move(done));
} else if (immutable_state_.requires_control_flow_support()) {
(new ExecutorState<PropagatorState>(args, immutable_state_, &kernel_stats_))
->RunAsync(std::move(done));
} else {
(new ExecutorState<SimplePropagatorState>(args, immutable_state_,
&kernel_stats_))
->RunAsync(std::move(done));
}
}
}
Status NewLocalExecutor(const LocalExecutorParams& params, const Graph& graph,
Executor** executor) {
ExecutorImpl* impl = new ExecutorImpl(params);
const Status s = impl->Initialize(graph);
if (s.ok()) {
*executor = impl;
} else {
delete impl;
}
return s;
}
Status CreateNonCachedKernel(Device* device, FunctionLibraryRuntime* flib,
const std::shared_ptr<const NodeProperties>& props,
int graph_def_version, OpKernel** kernel) {
const auto device_type = DeviceType(device->attributes().device_type());
auto allocator = device->GetAllocator(AllocatorAttributes());
return CreateOpKernel(device_type, device, allocator, flib,
device->resource_manager(), props, graph_def_version,
kernel);
}
void DeleteNonCachedKernel(OpKernel* kernel) { delete kernel; }
namespace {
class DefaultExecutorRegistrar {
public:
DefaultExecutorRegistrar() {
Factory* factory = new Factory;
ExecutorFactory::Register("", factory);
ExecutorFactory::Register("DEFAULT", factory);
}
private:
class Factory : public ExecutorFactory {
Status NewExecutor(const LocalExecutorParams& params, const Graph& graph,
std::unique_ptr<Executor>* out_executor) override {
Executor* ret = nullptr;
TF_RETURN_IF_ERROR(NewLocalExecutor(params, std::move(graph), &ret));
out_executor->reset(ret);
return absl::OkStatus();
}
};
};
static DefaultExecutorRegistrar registrar;
}
} | #include "tensorflow/core/common_runtime/executor.h"
#include <algorithm>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/local_rendezvous.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class ExecutorTest : public ::testing::Test {
protected:
ExecutorTest()
: device_(DeviceFactory::NewDevice("CPU", {},
"/job:localhost/replica:0/task:0")),
step_stats_collector_(&step_stats_) {
SessionOptions options;
thread_pool_ = ComputePool(options);
}
~ExecutorTest() override {
while (!rendez_->RefCountIsOne()) {
LOG(INFO) << "Waiting for rendezvous to release. Current refcount: "
<< rendez_->RefCount();
absl::SleepFor(absl::Milliseconds(200));
LocalRendezvous::ReleaseAbortedRendezvous();
}
CHECK(rendez_->Unref());
delete exec_;
}
void Create(std::unique_ptr<const Graph> graph) {
const int version = graph->versions().producer();
LocalExecutorParams params;
params.device = device_.get();
params.create_kernel =
[this, version](const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) {
return CreateNonCachedKernel(device_.get(), nullptr, props, version,
kernel);
};
params.delete_kernel = [](OpKernel* kernel) {
DeleteNonCachedKernel(kernel);
};
rendez_ = NewLocalRendezvous();
delete exec_;
TF_CHECK_OK(NewLocalExecutor(params, *graph, &exec_));
runner_ = [this](std::function<void()> fn) { thread_pool_->Schedule(fn); };
}
Status Run(Rendezvous* rendez) {
Executor::Args args;
args.rendezvous = rendez;
args.stats_collector = &step_stats_collector_;
args.runner = runner_;
return exec_->Run(args);
}
thread::ThreadPool* thread_pool_ = nullptr;
std::unique_ptr<Device> device_;
Executor* exec_ = nullptr;
StepStatsCollector step_stats_collector_;
StepStats step_stats_;
Executor::Args::Runner runner_;
Rendezvous* rendez_ = nullptr;
};
Tensor V(const float val) {
Tensor tensor(DT_FLOAT, TensorShape({}));
tensor.scalar<float>()() = val;
return tensor;
}
Tensor VI(const int32_t val) {
Tensor tensor(DT_INT32, TensorShape({}));
tensor.scalar<int32>()() = val;
return tensor;
}
Tensor VB(const bool val) {
Tensor tensor(DT_BOOL, TensorShape({}));
tensor.scalar<bool>()() = val;
return tensor;
}
Tensor VD(const double val) {
Tensor tensor(DT_DOUBLE, TensorShape({}));
tensor.scalar<double>()() = val;
return tensor;
}
float V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_FLOAT);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<float>()();
}
static uint64 kIncarnation = 1;
Rendezvous::ParsedKey Key(const string& sender, const uint64 incarnation,
const string& receiver, const string& name) {
Rendezvous::ParsedKey result;
CHECK(
Rendezvous::ParseKey(Rendezvous::CreateKey(sender, incarnation, receiver,
name, FrameAndIter(0, 0)),
&result)
.ok());
return result;
}
#define ALICE "/job:j/replica:0/task:0/cpu:0"
#define BOB "/job:j/replica:0/task:0/device:GPU:0"
TEST_F(ExecutorTest, SimpleAdd) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Recv(g.get(), "b", "float", ALICE, 1, BOB);
auto tmp = test::graph::Add(g.get(), in0, in1);
test::graph::Send(g.get(), tmp, "c", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0),
false));
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "b"), args, V(1.0),
false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "c"), args, &out, &is_dead));
EXPECT_EQ(2.0, V(out));
}
TEST_F(ExecutorTest, SelfAdd) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto v = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
const int N = 10;
for (int i = 1; i <= N; ++i) {
v = test::graph::Add(g.get(), v, v);
}
test::graph::Send(g.get(), v, "b", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(
rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0), false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "b"), args, &out, &is_dead));
EXPECT_EQ(1024.0, V(out));
}
void BuildTree(int N, Graph* g) {
CHECK_GT(N, 1);
auto in = test::graph::Recv(g, "a", "float", ALICE, 1, BOB);
std::vector<Node*> nodes;
int i = 0;
for (; i < N; ++i) {
nodes.push_back(test::graph::Identity(g, in, 0));
}
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);
while (nodes.size() > 1) {
int x = rnd.Uniform(nodes.size());
auto in0 = nodes[x];
nodes[x] = nodes.back();
nodes.resize(nodes.size() - 1);
x = rnd.Uniform(nodes.size());
auto in1 = nodes[x];
nodes[x] = test::graph::Add(g, in0, in1);
}
test::graph::Send(g, nodes.back(), "b", BOB, 1, ALICE);
}
TEST_F(ExecutorTest, RandomTree) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
BuildTree(4096, g.get());
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(
rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0), false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "b"), args, &out, &is_dead));
EXPECT_EQ(4096.0, V(out));
}
void BuildConcurrentAddAssign(Graph* g) {
auto one = test::graph::Constant(g, V(1.0));
auto var = test::graph::Var(g, DT_FLOAT, TensorShape({}));
auto init = test::graph::Assign(g, var, one);
auto out = test::graph::Send(g, var, "out", ALICE, kIncarnation, BOB);
for (int i = 0; i < 1024; ++i) {
auto add = test::graph::Add(g, var, one);
g->AddControlEdge(init, add);
auto assign = test::graph::Assign(g, var, add);
g->AddControlEdge(assign, out);
}
}
#ifndef THREAD_SANITIZER
TEST_F(ExecutorTest, ConcurrentAddAssign) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
BuildConcurrentAddAssign(g.get());
Create(std::move(g));
for (int iters = 0; iters < 16; ++iters) {
Rendezvous* rendez = NewLocalRendezvous();
TF_ASSERT_OK(Run(rendez));
Rendezvous::Args args;
Tensor out;
bool is_dead;
TF_ASSERT_OK(rendez->Recv(Key(ALICE, kIncarnation, BOB, "out"), args, &out,
&is_dead));
VLOG(1) << "Get " << V(out);
EXPECT_LE(V(out), 1025.0);
rendez->Unref();
}
}
#endif
TEST_F(ExecutorTest, SimpleSwitchLive) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Constant(g.get(), VB(false));
auto tmp = test::graph::Switch(g.get(), in0, in1);
test::graph::Send(g.get(), tmp, "c", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0),
false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "c"), args, &out, &is_dead));
EXPECT_EQ(1.0, V(out));
EXPECT_FALSE(is_dead);
}
TEST_F(ExecutorTest, SimpleSwitchDead) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Constant(g.get(), VB(true));
auto tmp = test::graph::Switch(g.get(), in0, in1);
test::graph::Send(g.get(), tmp, "c", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0),
false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "c"), args, &out, &is_dead));
EXPECT_TRUE(is_dead);
}
TEST_F(ExecutorTest, Abort) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Recv(g.get(), "b", "float", ALICE, 1, BOB);
auto in2 = test::graph::Recv(g.get(), "c", "float", ALICE, 1, BOB);
auto in3 = test::graph::Recv(g.get(), "d", "float", ALICE, 1, BOB);
auto add0 = test::graph::Add(g.get(), in0, in1);
auto add1 = test::graph::Add(g.get(), in2, in3);
auto add2 = test::graph::Add(g.get(), add0, add1);
test::graph::Send(g.get(), add2, "e", BOB, 1, ALICE);
Create(std::move(g));
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"),
Rendezvous::Args(), V(1.0), false);
rendez_->Unref();
});
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, "b"),
Rendezvous::Args(), V(1.0), false);
rendez_->Unref();
});
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, "c"),
Rendezvous::Args(), V(1.0), false);
rendez_->Unref();
});
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
rendez_->StartAbort(errors::Aborted(""));
rendez_->Unref();
});
EXPECT_TRUE(errors::IsAborted(Run(rendez_)));
Tensor out = V(-1);
bool is_dead = false;
EXPECT_TRUE(errors::IsAborted(rendez_->Recv(
Key(BOB, kIncarnation, ALICE, "c"), Rendezvous::Args(), &out, &is_dead)));
}
TEST_F(ExecutorTest, RecvInvalidDtype) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto one = test::graph::Recv(g.get(), "one", "float", ALICE, 1, BOB);
auto var = test::graph::Var(g.get(), DT_FLOAT, TensorShape({1}));
auto init = test::graph::Assign(g.get(), var, one);
auto* two = test::graph::Send(g.get(), var, "two", BOB, 1, ALICE);
g->AddControlEdge(init, two);
Create(std::move(g));
Rendezvous* rendez = NewLocalRendezvous();
TF_ASSERT_OK(rendez->Send(Key(ALICE, 1, BOB, "one"), Rendezvous::Args(),
VD(1.0), false));
EXPECT_TRUE(errors::IsInternal(Run(rendez)));
Tensor output;
bool is_dead;
EXPECT_TRUE(errors::IsInternal(rendez->Recv(
Key(BOB, 1, ALICE, "two"), Rendezvous::Args(), &output, &is_dead)));
rendez->Unref();
}
TEST_F(ExecutorTest, RecvInvalidRefDtype) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto var = test::graph::InvalidRefType(g.get(), DT_FLOAT, DT_DOUBLE);
test::graph::Send(g.get(), var, "out", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous* rendez = NewLocalRendezvous();
EXPECT_TRUE(errors::IsInternal(Run(rendez)));
Tensor output;
bool is_dead;
EXPECT_TRUE(errors::IsInternal(rendez->Recv(
Key(BOB, 1, ALICE, "out"), Rendezvous::Args(), &output, &is_dead)));
rendez->Unref();
}
TEST_F(ExecutorTest, NoInputTensors) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
test::graph::Constant(g.get(), V(1.0));
Create(std::move(g));
TF_ASSERT_OK(Run(rendez_));
}
static void BM_executor(::testing::benchmark::State& state) {
const int width = state.range(0);
const int depth = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
random::PhiloxRandom philox(1729, 17);
random::SimplePhilox rand(&philox);
uint64 cur = 0;
uint32 r = 1 + rand.Rand32() % width;
std::vector<Node*> ready_nodes;
for (int i = 0; i < r; ++i) {
ready_nodes.push_back(test::graph::NoOp(g, {}));
++cur;
}
std::random_device random_device;
std::mt19937 rng(random_device());
for (int i = 0; i < depth; ++i) {
std::shuffle(ready_nodes.begin(), ready_nodes.end(), rng);
r = 1 + rand.Rand32() % (ready_nodes.size());
std::vector<Node*> control_inputs;
for (int j = 0; j < r; ++j) {
control_inputs.push_back(ready_nodes.back());
ready_nodes.pop_back();
}
Node* n = test::graph::NoOp(g, control_inputs);
++cur;
r = 1 + rand.Rand32() % width;
for (int j = 0; j < r; ++j) {
ready_nodes.push_back(test::graph::NoOp(g, {n}));
++cur;
}
}
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, false).Run(state);
state.SetLabel(strings::StrCat("Nodes = ", cur));
state.SetItemsProcessed(cur * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(16, 1024);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(32, 8192);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 16);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(8192, 32);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 1024);
static void BM_const_identity(::testing::benchmark::State& state) {
const int width = state.range(0);
const int outputs_per_const = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
for (int i = 0; i < width; ++i) {
Tensor i_t(i);
Node* const_node = test::graph::Constant(g, i_t);
for (int j = 0; j < outputs_per_const; ++j) {
test::graph::Identity(g, const_node);
}
}
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, false).Run(state);
state.SetLabel(strings::StrCat("Nodes = ", (1 + outputs_per_const) * width));
state.SetItemsProcessed((1 + outputs_per_const) * width *
static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_const_identity)
->UseRealTime()
->ArgPair(1, 1)
->ArgPair(1, 100)
->ArgPair(100, 1)
->ArgPair(100, 100);
static void BM_FeedInputFetchOutput(::testing::benchmark::State& state) {
Graph* g = new Graph(OpRegistry::Global());
Node* x = test::graph::Recv(g, "x", "float", ALICE, 1, BOB);
Node* y = test::graph::Recv(g, "y", "float", ALICE, 1, BOB);
Node* sum = test::graph::Add(g, x, y);
Node* z = test::graph::Send(g, sum, "z", BOB, 1, ALICE);
string x_key = test::GetRendezvousKey(x);
string y_key = test::GetRendezvousKey(y);
string z_key = test::GetRendezvousKey(z);
Tensor val(DT_FLOAT, TensorShape({}));
val.scalar<float>()() = 3.14;
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, false)
.RunWithRendezvousArgs({{x_key, val}, {y_key, val}}, {z_key}, state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_FeedInputFetchOutput);
Status ReplaceEdgeWithSendRecv(Graph* g, const Edge* edge, const string& tensor,
const string& sender,
const uint64 sender_incarnation,
const string& receiver) {
Node* send;
NodeDef send_def;
TF_CHECK_OK(NodeDefBuilder(g->NewName("n"), "_Send")
.Input(edge->src()->name(), edge->src_output(),
edge->src()->output_type(edge->src_output()))
.Attr("tensor_name", tensor)
.Attr("send_device", sender)
.Attr("send_device_incarnation",
static_cast<int64_t>(sender_incarnation))
.Attr("recv_device", receiver)
.Finalize(&send_def));
TF_ASSIGN_OR_RETURN(send, g->AddNode(send_def));
Node* recv;
NodeDef recv_def;
TF_CHECK_OK(
NodeDefBuilder(g->NewName("n"), "_Recv")
.Attr("tensor_name", tensor)
.Attr("send_device", sender)
.Attr("send_device_incarnation",
static_cast<int64_t>(sender_incarnation))
.Attr("recv_device", receiver)
.Attr("tensor_type", edge->dst()->input_type(edge->dst_input()))
.Finalize(&recv_def));
TF_ASSIGN_OR_RETURN(recv, g->AddNode(recv_def));
g->AddEdge(edge->src(), edge->src_output(), send, 0);
g->AddEdge(recv, 0, edge->dst(), edge->dst_input());
g->AddControlEdge(edge->src(), recv);
g->RemoveEdge(edge);
return absl::OkStatus();
}
static void BM_WhileLoopHelper(::testing::benchmark::State& state,
int loop_iters, int loop_vars, bool lower,
bool transfer) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
const Tensor one_t = test::AsScalar<int32>(1);
std::vector<string> args;
args.reserve(loop_vars);
args.push_back("x: int32");
for (int i = 1; i < loop_vars; ++i) {
args.push_back(strings::StrCat("x", i, ": int32"));
}
std::vector<string> body_rets;
body_rets.reserve(loop_vars);
body_rets.push_back("y: int32");
for (int i = 1; i < loop_vars; ++i) {
body_rets.push_back(strings::StrCat("y", i, ": int32"));
}
std::vector<FunctionDefHelper::Node> body_nodes;
body_nodes.reserve(1 + loop_vars);
body_nodes.push_back(
{{"one"}, "Const", {}, {{"value", one_t}, {"dtype", DT_INT32}}});
body_nodes.push_back({{"y"}, "Add", {"x", "one"}, {{"T", DT_INT32}}});
for (int i = 1; i < loop_vars; ++i) {
body_nodes.push_back({{strings::StrCat("y", i)},
"Relu",
{strings::StrCat("x", i)},
{{"T", DT_INT32}}});
}
*f_lib_proto.add_function() = FunctionDefHelper::Define(
"XPlusOne",
args,
body_rets,
{},
body_nodes);
const Tensor loop_iters_t = test::AsScalar<int32>(loop_iters);
*f_lib_proto.add_function() = FunctionDefHelper::Define(
"LessThanOrEqualToN",
args,
{"z: bool"},
{},
{
{{"N"}, "Const", {}, {{"value", loop_iters_t}, {"dtype", DT_INT32}}},
{{"z"}, "LessEqual", {"x", "N"}, {{"T", DT_INT32}}},
});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Const(root.WithOpName("A"), 0, {});
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs;
std::vector<DataType> input_types(loop_vars, DT_INT32);
inputs.reserve(loop_vars);
for (int i = 0; i < loop_vars; ++i) {
inputs.push_back(NodeBuilder::NodeOut(a.node()));
}
AttrValue int32_attr;
int32_attr.set_type(DT_INT32);
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XPlusOne");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", input_types)
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 20)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(while_node)),
Output(while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
if (lower) {
FunctionLibraryDefinition flib_def(graph->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
opt_options.session_options = &session_options;
opt_options.graph = &graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
TF_ASSERT_OK(pass.Run(opt_options));
if (transfer) {
for (Node* node : graph->nodes()) {
if (node->type_string() != "LoopCond") {
continue;
}
for (const Edge* edge : node->out_edges()) {
if (edge->dst()->type_string() != "Switch") {
continue;
}
string tensor_name = strings::StrCat("c", edge->id());
TF_ASSERT_OK(ReplaceEdgeWithSendRecv(graph.get(), edge, tensor_name,
BOB, 1, ALICE));
}
}
}
}
SessionOptions options;
options.config.set_inter_op_parallelism_threads(4);
FixupSourceAndSinkEdges(graph.get());
test::Benchmark("cpu", graph.release(), &options, nullptr, nullptr, "",
false)
.Run(state);
}
static void BM_LoweredWhileLoop(::testing::benchmark::State& state) {
const int loop_iters = state.range(0);
const int loop_vars = state.range(1);
BM_WhileLoopHelper(state, loop_iters, loop_vars, true,
false);
}
BENCHMARK(BM_LoweredWhileLoop)
->ArgPair(0, 1)
->ArgPair(1, 1)
->ArgPair(10, 1)
->ArgPair(100, 1)
->ArgPair(1000, 1)
->ArgPair(0, 100)
->ArgPair(1, 100)
->ArgPair(10, 100)
->ArgPair(100, 100)
->ArgPair(1000, 100);
static void BM_LoweredWhileLoopWithTransfer(
::testing::benchmark::State& state) {
const int loop_iters = state.range(0);
const int loop_vars = state.range(1);
BM_WhileLoopHelper(state, loop_iters, loop_vars, true,
true);
}
BENCHMARK(BM_LoweredWhileLoopWithTransfer)
->ArgPair(0, 100)
->ArgPair(1, 100)
->ArgPair(10, 100)
->ArgPair(100, 100)
->ArgPair(1000, 100)
->ArgPair(1, 5000)
->ArgPair(10, 5000)
->ArgPair(100, 5000)
->ArgPair(1000, 5000);
static void BM_FunctionalWhileLoop(::testing::benchmark::State& state) {
const int loop_iters = state.range(0);
const int loop_vars = state.range(1);
BM_WhileLoopHelper(state, loop_iters, loop_vars, false,
false);
}
BENCHMARK(BM_FunctionalWhileLoop)
->ArgPair(0, 1)
->ArgPair(1, 1)
->ArgPair(10, 1)
->ArgPair(100, 1)
->ArgPair(1000, 1)
->ArgPair(0, 100)
->ArgPair(1, 100)
->ArgPair(10, 100)
->ArgPair(100, 100)
->ArgPair(1000, 100);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ef74f11-3523-4202-9791-2c1beb9227af | cpp | tensorflow/tensorflow | grpc_worker_cache | tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc | tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/rpc/coordination/grpc_coordination_client.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/distributed_runtime/worker_cache_logger.h"
#include "tensorflow/core/distributed_runtime/worker_cache_partial.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
class GrpcWorkerCache : public WorkerCachePartial {
public:
explicit GrpcWorkerCache(std::shared_ptr<GrpcChannelCache> channel_cache,
WorkerInterface* local_worker,
const string& local_target,
GrpcWorkerEnv* worker_env)
: local_target_(local_target),
local_worker_(local_worker),
channel_cache_(channel_cache),
worker_env_(worker_env),
next_round_robin_assignment_(0) {}
void ListWorkers(std::vector<string>* workers) const override {
channel_cache_->ListWorkers(workers);
}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {
channel_cache_->ListWorkersInJob(job_name, workers);
}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (target == local_target_) {
return local_worker_;
} else {
SharedGrpcChannelPtr channel = channel_cache_->FindWorkerChannel(target);
if (!channel) {
return nullptr;
}
size_t index = AssignWorkerToThread(target);
return NewGrpcRemoteWorker(
channel, worker_env_->GetCompletionQueue(index),
worker_env_->GetThreadPool(), &logger_, target);
}
}
void ReleaseWorker(const string& target, WorkerInterface* worker) override {
if (target == local_target_) {
CHECK_EQ(worker, local_worker_)
<< "Releasing a worker that was not returned by this WorkerCache";
} else {
WorkerCacheInterface::ReleaseWorker(target, worker);
}
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
eager_client_cache->reset(eager::NewGrpcEagerClientCache(channel_cache_));
return absl::OkStatus();
}
Status GetCoordinationClientCache(std::unique_ptr<CoordinationClientCache>*
coordination_client_cache) override {
coordination_client_cache->reset(
NewGrpcCoordinationClientCache(channel_cache_));
return absl::OkStatus();
}
void SetLogging(bool v) override { logger_.SetLogging(v); }
void ClearLogs() override { logger_.ClearLogs(); }
bool RetrieveLogs(int64_t step_id, StepStats* ss) override {
return logger_.RetrieveLogs(step_id, ss);
}
private:
size_t AssignWorkerToThread(const string& target) {
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(target,
(next_round_robin_assignment_++) %
worker_env_->CompletionQueueSize()))
.first;
}
return it->second;
}
const string local_target_;
WorkerInterface* const local_worker_;
std::shared_ptr<GrpcChannelCache> channel_cache_;
WorkerCacheLogger logger_;
GrpcWorkerEnv* worker_env_;
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
TF_GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);
};
}
GrpcWorkerEnv::GrpcWorkerEnv(size_t num_completion_queues, size_t num_threads)
: threadpool_(new thread::ThreadPool(
Env::Default(), ThreadOptions(), "GrpcWorkerEnvQueues", num_threads,
false, nullptr)),
threads_(num_completion_queues) {}
GrpcWorkerEnv::~GrpcWorkerEnv() { threads_.clear(); }
GrpcWorkerEnv::GrpcWorkerCacheThread::GrpcWorkerCacheThread() {
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "GrpcWorkerEnvPool", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
GrpcClientCQTag* callback_tag = static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
}
}));
}
GrpcWorkerEnv::GrpcWorkerCacheThread::~GrpcWorkerCacheThread() {
completion_queue_.Shutdown();
thread_.reset();
}
GrpcWorkerEnv* CreateGrpcWorkerEnv() {
int num_cpus = port::NumSchedulableCPUs();
int64_t num_completion_queues;
Status status = ReadInt64FromEnvVar("TF_GRPC_WORKER_CACHE_QUEUES", 64,
&num_completion_queues);
if (!status.ok()) {
LOG(ERROR) << "Error parsing TF_GRPC_WORKER_CACHE_QUEUES: " << status;
}
int64_t num_threads;
status = ReadInt64FromEnvVar("TF_GRPC_WORKER_CACHE_THREADS", num_cpus,
&num_threads);
if (!status.ok()) {
LOG(ERROR) << "Error parsing TF_GRPC_WORKER_CACHE_THREADS: " << status;
}
return new GrpcWorkerEnv(num_completion_queues, num_threads);
}
WorkerCacheInterface* NewGrpcWorkerCache(std::shared_ptr<GrpcChannelCache> cc,
GrpcWorkerEnv* worker_env) {
return new GrpcWorkerCache(cc, nullptr, "",
worker_env);
}
WorkerCacheInterface* NewGrpcWorkerCacheWithLocalWorker(
std::shared_ptr<GrpcChannelCache> cc, GrpcWorkerEnv* worker_env,
WorkerInterface* local_worker, const string& local_target) {
return new GrpcWorkerCache(cc, local_worker, local_target, worker_env);
}
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
TEST(GrpcWorkerCacheTest, NewGrpcWorkerCache) {
GrpcChannelSpec spec;
TF_ASSERT_OK(
spec.AddHostPortsJob("worker", {{0, "a:0"}, {1, "b:1"}, {2, "c:2"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env(CreateGrpcWorkerEnv());
std::unique_ptr<WorkerCacheInterface> worker_cache(
NewGrpcWorkerCache(channel_cache, grpc_worker_env.get()));
WorkerInterface* wi;
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:0");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:0", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:1");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:1", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:2");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:2", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:3");
EXPECT_EQ(wi, nullptr);
std::unique_ptr<TestWorkerInterface> local_wi;
worker_cache.reset(NewGrpcWorkerCacheWithLocalWorker(
channel_cache, grpc_worker_env.get(), local_wi.get(), "local_target"));
wi = worker_cache->GetOrCreateWorker("local_target");
EXPECT_EQ(wi, local_wi.get());
}
TEST(GrpcWorkerCacheTest, DestructWorkerCacheInThreadPool) {
GrpcChannelSpec spec;
TF_ASSERT_OK(
spec.AddHostPortsJob("worker", {{0, "a:0"}, {1, "b:1"}, {2, "c:2"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env(CreateGrpcWorkerEnv());
WorkerCacheInterface* worker_cache =
NewGrpcWorkerCache(channel_cache, grpc_worker_env.get());
thread::ThreadPool* tp = grpc_worker_env->GetThreadPool();
Notification n;
tp->Schedule([worker_cache, &n] {
delete worker_cache;
n.Notify();
});
n.WaitForNotification();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ea8a6b6-b2a9-416e-9b86-777aa27c03fe | cpp | google/cel-cpp | proto_time_encoding | internal/proto_time_encoding.cc | internal/proto_time_encoding_test.cc | #include "internal/proto_time_encoding.h"
#include <string>
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/util/time_util.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "internal/status_macros.h"
#include "internal/time.h"
namespace cel::internal {
namespace {
absl::Status Validate(absl::Time time) {
if (time < cel::internal::MinTimestamp()) {
return absl::InvalidArgumentError("time below min");
}
if (time > cel::internal::MaxTimestamp()) {
return absl::InvalidArgumentError("time above max");
}
return absl::OkStatus();
}
absl::Status CelValidateDuration(absl::Duration duration) {
if (duration < cel::internal::MinDuration()) {
return absl::InvalidArgumentError("duration below min");
}
if (duration > cel::internal::MaxDuration()) {
return absl::InvalidArgumentError("duration above max");
}
return absl::OkStatus();
}
}
absl::Duration DecodeDuration(const google::protobuf::Duration& proto) {
return absl::Seconds(proto.seconds()) + absl::Nanoseconds(proto.nanos());
}
absl::Time DecodeTime(const google::protobuf::Timestamp& proto) {
return absl::FromUnixSeconds(proto.seconds()) +
absl::Nanoseconds(proto.nanos());
}
absl::Status EncodeDuration(absl::Duration duration,
google::protobuf::Duration* proto) {
CEL_RETURN_IF_ERROR(CelValidateDuration(duration));
const int64_t s = absl::IDivDuration(duration, absl::Seconds(1), &duration);
const int64_t n =
absl::IDivDuration(duration, absl::Nanoseconds(1), &duration);
proto->set_seconds(s);
proto->set_nanos(n);
return absl::OkStatus();
}
absl::StatusOr<std::string> EncodeDurationToString(absl::Duration duration) {
google::protobuf::Duration d;
auto status = EncodeDuration(duration, &d);
if (!status.ok()) {
return status;
}
return google::protobuf::util::TimeUtil::ToString(d);
}
absl::Status EncodeTime(absl::Time time, google::protobuf::Timestamp* proto) {
CEL_RETURN_IF_ERROR(Validate(time));
const int64_t s = absl::ToUnixSeconds(time);
proto->set_seconds(s);
proto->set_nanos((time - absl::FromUnixSeconds(s)) / absl::Nanoseconds(1));
return absl::OkStatus();
}
absl::StatusOr<std::string> EncodeTimeToString(absl::Time time) {
google::protobuf::Timestamp t;
auto status = EncodeTime(time, &t);
if (!status.ok()) {
return status;
}
return google::protobuf::util::TimeUtil::ToString(t);
}
} | #include "internal/proto_time_encoding.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "absl/time/time.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace cel::internal {
namespace {
using ::google::api::expr::testutil::EqualsProto;
TEST(EncodeDuration, Basic) {
google::protobuf::Duration proto_duration;
ASSERT_OK(
EncodeDuration(absl::Seconds(2) + absl::Nanoseconds(3), &proto_duration));
EXPECT_THAT(proto_duration, EqualsProto("seconds: 2 nanos: 3"));
}
TEST(EncodeDurationToString, Basic) {
ASSERT_OK_AND_ASSIGN(
std::string json,
EncodeDurationToString(absl::Seconds(5) + absl::Nanoseconds(20)));
EXPECT_EQ(json, "5.000000020s");
}
TEST(EncodeTime, Basic) {
google::protobuf::Timestamp proto_timestamp;
ASSERT_OK(EncodeTime(absl::FromUnixMillis(300000), &proto_timestamp));
EXPECT_THAT(proto_timestamp, EqualsProto("seconds: 300"));
}
TEST(EncodeTimeToString, Basic) {
ASSERT_OK_AND_ASSIGN(std::string json,
EncodeTimeToString(absl::FromUnixMillis(80030)));
EXPECT_EQ(json, "1970-01-01T00:01:20.030Z");
}
TEST(DecodeDuration, Basic) {
google::protobuf::Duration proto_duration;
proto_duration.set_seconds(450);
proto_duration.set_nanos(4);
EXPECT_EQ(DecodeDuration(proto_duration),
absl::Seconds(450) + absl::Nanoseconds(4));
}
TEST(DecodeTime, Basic) {
google::protobuf::Timestamp proto_timestamp;
proto_timestamp.set_seconds(450);
EXPECT_EQ(DecodeTime(proto_timestamp), absl::FromUnixSeconds(450));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_time_encoding.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_time_encoding_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c8984e03-8b10-44e4-81f8-3b0019417a42 | cpp | abseil/abseil-cpp | str_replace | absl/strings/str_replace.cc | absl/strings/str_replace_test.cc | #include "absl/strings/str_replace.h"
#include <cstddef>
#include <initializer_list>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/config.h"
#include "absl/base/nullability.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
using FixedMapping =
std::initializer_list<std::pair<absl::string_view, absl::string_view>>;
int ApplySubstitutions(
absl::string_view s,
absl::Nonnull<std::vector<strings_internal::ViableSubstitution>*> subs_ptr,
absl::Nonnull<std::string*> result_ptr) {
auto& subs = *subs_ptr;
int substitutions = 0;
size_t pos = 0;
while (!subs.empty()) {
auto& sub = subs.back();
if (sub.offset >= pos) {
if (pos <= s.size()) {
StrAppend(result_ptr, s.substr(pos, sub.offset - pos), sub.replacement);
}
pos = sub.offset + sub.old.size();
substitutions += 1;
}
sub.offset = s.find(sub.old, pos);
if (sub.offset == s.npos) {
subs.pop_back();
} else {
size_t index = subs.size();
while (--index && subs[index - 1].OccursBefore(subs[index])) {
std::swap(subs[index], subs[index - 1]);
}
}
}
result_ptr->append(s.data() + pos, s.size() - pos);
return substitutions;
}
}
std::string StrReplaceAll(absl::string_view s,
strings_internal::FixedMapping replacements) {
return StrReplaceAll<strings_internal::FixedMapping>(s, replacements);
}
int StrReplaceAll(strings_internal::FixedMapping replacements,
absl::Nonnull<std::string*> target) {
return StrReplaceAll<strings_internal::FixedMapping>(replacements, target);
}
ABSL_NAMESPACE_END
} | #include "absl/strings/str_replace.h"
#include <list>
#include <map>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
TEST(StrReplaceAll, OneReplacement) {
std::string s;
s = absl::StrReplaceAll(s, {{"", ""}});
EXPECT_EQ(s, "");
s = absl::StrReplaceAll(s, {{"x", ""}});
EXPECT_EQ(s, "");
s = absl::StrReplaceAll(s, {{"", "y"}});
EXPECT_EQ(s, "");
s = absl::StrReplaceAll(s, {{"x", "y"}});
EXPECT_EQ(s, "");
s = absl::StrReplaceAll("abc", {{"", ""}});
EXPECT_EQ(s, "abc");
s = absl::StrReplaceAll("abc", {{"", "y"}});
EXPECT_EQ(s, "abc");
s = absl::StrReplaceAll("abc", {{"x", ""}});
EXPECT_EQ(s, "abc");
s = absl::StrReplaceAll("abc", {{"xyz", "123"}});
EXPECT_EQ(s, "abc");
s = absl::StrReplaceAll("abc", {{"abc", "xyz"}});
EXPECT_EQ(s, "xyz");
s = absl::StrReplaceAll("abc", {{"a", "x"}});
EXPECT_EQ(s, "xbc");
s = absl::StrReplaceAll("abc", {{"b", "x"}});
EXPECT_EQ(s, "axc");
s = absl::StrReplaceAll("abc", {{"c", "x"}});
EXPECT_EQ(s, "abx");
s = absl::StrReplaceAll("ababa", {{"a", "xxx"}});
EXPECT_EQ(s, "xxxbxxxbxxx");
s = absl::StrReplaceAll("ababa", {{"b", "xxx"}});
EXPECT_EQ(s, "axxxaxxxa");
s = absl::StrReplaceAll("aaabaaabaaa", {{"aaa", "x"}});
EXPECT_EQ(s, "xbxbx");
s = absl::StrReplaceAll("abbbabbba", {{"bbb", "x"}});
EXPECT_EQ(s, "axaxa");
s = absl::StrReplaceAll("aaa", {{"aa", "x"}});
EXPECT_EQ(s, "xa");
s = absl::StrReplaceAll("aaa", {{"aa", "a"}});
EXPECT_EQ(s, "aa");
}
TEST(StrReplaceAll, ManyReplacements) {
std::string s;
s = absl::StrReplaceAll("", {{"", ""}, {"x", ""}, {"", "y"}, {"x", "y"}});
EXPECT_EQ(s, "");
s = absl::StrReplaceAll("abc", {{"", ""}, {"", "y"}, {"x", ""}});
EXPECT_EQ(s, "abc");
s = absl::StrReplaceAll("abc", {{"a", "x"}, {"b", "y"}, {"c", "z"}});
EXPECT_EQ(s, "xyz");
s = absl::StrReplaceAll("zxy", {{"z", "x"}, {"x", "y"}, {"y", "z"}});
EXPECT_EQ(s, "xyz");
s = absl::StrReplaceAll("abc", {{"a", "x"}, {"ab", "xy"}, {"abc", "xyz"}});
EXPECT_EQ(s, "xyz");
s = absl::StrReplaceAll(
"Abc!", {{"a", "x"}, {"ab", "xy"}, {"b", "y"}, {"bc", "yz"}, {"c", "z"}});
EXPECT_EQ(s, "Ayz!");
s = absl::StrReplaceAll(
"Abc!",
{{"a", "x"}, {"ab", "xy"}, {"b", "y"}, {"bc!", "yz?"}, {"c!", "z;"}});
EXPECT_EQ(s, "Ayz?");
s = absl::StrReplaceAll("ababa", {{"a", "xxx"}, {"b", "XXXX"}});
EXPECT_EQ(s, "xxxXXXXxxxXXXXxxx");
s = absl::StrReplaceAll("aaa", {{"aa", "x"}, {"a", "X"}});
EXPECT_EQ(s, "xX");
s = absl::StrReplaceAll("aaa", {{"a", "X"}, {"aa", "x"}});
EXPECT_EQ(s, "xX");
s = absl::StrReplaceAll("the quick brown fox jumped over the lazy dogs",
{
{"brown", "box"},
{"dogs", "jugs"},
{"fox", "with"},
{"jumped", "five"},
{"over", "dozen"},
{"quick", "my"},
{"the", "pack"},
{"the lazy", "liquor"},
});
EXPECT_EQ(s, "pack my box with five dozen liquor jugs");
}
TEST(StrReplaceAll, ManyReplacementsInMap) {
std::map<const char *, const char *> replacements;
replacements["$who"] = "Bob";
replacements["$count"] = "5";
replacements["#Noun"] = "Apples";
std::string s = absl::StrReplaceAll("$who bought $count #Noun. Thanks $who!",
replacements);
EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
}
TEST(StrReplaceAll, ReplacementsInPlace) {
std::string s = std::string("$who bought $count #Noun. Thanks $who!");
int count;
count = absl::StrReplaceAll({{"$count", absl::StrCat(5)},
{"$who", "Bob"},
{"#Noun", "Apples"}}, &s);
EXPECT_EQ(count, 4);
EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
}
TEST(StrReplaceAll, ReplacementsInPlaceInMap) {
std::string s = std::string("$who bought $count #Noun. Thanks $who!");
std::map<absl::string_view, absl::string_view> replacements;
replacements["$who"] = "Bob";
replacements["$count"] = "5";
replacements["#Noun"] = "Apples";
int count;
count = absl::StrReplaceAll(replacements, &s);
EXPECT_EQ(count, 4);
EXPECT_EQ("Bob bought 5 Apples. Thanks Bob!", s);
}
struct Cont {
Cont() = default;
explicit Cont(absl::string_view src) : data(src) {}
absl::string_view data;
};
template <int index>
absl::string_view get(const Cont& c) {
auto splitter = absl::StrSplit(c.data, ':');
auto it = splitter.begin();
for (int i = 0; i < index; ++i) ++it;
return *it;
}
TEST(StrReplaceAll, VariableNumber) {
std::string s;
{
std::vector<std::pair<std::string, std::string>> replacements;
s = "abc";
EXPECT_EQ(0, absl::StrReplaceAll(replacements, &s));
EXPECT_EQ("abc", s);
s = "abc";
replacements.push_back({"a", "A"});
EXPECT_EQ(1, absl::StrReplaceAll(replacements, &s));
EXPECT_EQ("Abc", s);
s = "abc";
replacements.push_back({"b", "B"});
EXPECT_EQ(2, absl::StrReplaceAll(replacements, &s));
EXPECT_EQ("ABc", s);
s = "abc";
replacements.push_back({"d", "D"});
EXPECT_EQ(2, absl::StrReplaceAll(replacements, &s));
EXPECT_EQ("ABc", s);
EXPECT_EQ("ABcABc", absl::StrReplaceAll("abcabc", replacements));
}
{
std::map<const char*, const char*> replacements;
replacements["aa"] = "x";
replacements["a"] = "X";
s = "aaa";
EXPECT_EQ(2, absl::StrReplaceAll(replacements, &s));
EXPECT_EQ("xX", s);
EXPECT_EQ("xxX", absl::StrReplaceAll("aaaaa", replacements));
}
{
std::list<std::pair<absl::string_view, absl::string_view>> replacements = {
{"a", "x"}, {"b", "y"}, {"c", "z"}};
std::string s = absl::StrReplaceAll("abc", replacements);
EXPECT_EQ(s, "xyz");
}
{
using X = std::tuple<absl::string_view, std::string, int>;
std::vector<X> replacements(3);
replacements[0] = X{"a", "x", 1};
replacements[1] = X{"b", "y", 0};
replacements[2] = X{"c", "z", -1};
std::string s = absl::StrReplaceAll("abc", replacements);
EXPECT_EQ(s, "xyz");
}
{
std::vector<Cont> replacements(3);
replacements[0] = Cont{"a:x"};
replacements[1] = Cont{"b:y"};
replacements[2] = Cont{"c:z"};
std::string s = absl::StrReplaceAll("abc", replacements);
EXPECT_EQ(s, "xyz");
}
}
TEST(StrReplaceAll, Inplace) {
std::string s;
int reps;
s = "";
reps = absl::StrReplaceAll({{"", ""}, {"x", ""}, {"", "y"}, {"x", "y"}}, &s);
EXPECT_EQ(reps, 0);
EXPECT_EQ(s, "");
s = "abc";
reps = absl::StrReplaceAll({{"", ""}, {"", "y"}, {"x", ""}}, &s);
EXPECT_EQ(reps, 0);
EXPECT_EQ(s, "abc");
s = "abc";
reps = absl::StrReplaceAll({{"a", "x"}, {"b", "y"}, {"c", "z"}}, &s);
EXPECT_EQ(reps, 3);
EXPECT_EQ(s, "xyz");
s = "zxy";
reps = absl::StrReplaceAll({{"z", "x"}, {"x", "y"}, {"y", "z"}}, &s);
EXPECT_EQ(reps, 3);
EXPECT_EQ(s, "xyz");
s = "abc";
reps = absl::StrReplaceAll({{"a", "x"}, {"ab", "xy"}, {"abc", "xyz"}}, &s);
EXPECT_EQ(reps, 1);
EXPECT_EQ(s, "xyz");
s = "Abc!";
reps = absl::StrReplaceAll(
{{"a", "x"}, {"ab", "xy"}, {"b", "y"}, {"bc", "yz"}, {"c", "z"}}, &s);
EXPECT_EQ(reps, 1);
EXPECT_EQ(s, "Ayz!");
s = "Abc!";
reps = absl::StrReplaceAll(
{{"a", "x"}, {"ab", "xy"}, {"b", "y"}, {"bc!", "yz?"}, {"c!", "z;"}}, &s);
EXPECT_EQ(reps, 1);
EXPECT_EQ(s, "Ayz?");
s = "ababa";
reps = absl::StrReplaceAll({{"a", "xxx"}, {"b", "XXXX"}}, &s);
EXPECT_EQ(reps, 5);
EXPECT_EQ(s, "xxxXXXXxxxXXXXxxx");
s = "aaa";
reps = absl::StrReplaceAll({{"aa", "x"}, {"a", "X"}}, &s);
EXPECT_EQ(reps, 2);
EXPECT_EQ(s, "xX");
s = "aaa";
reps = absl::StrReplaceAll({{"a", "X"}, {"aa", "x"}}, &s);
EXPECT_EQ(reps, 2);
EXPECT_EQ(s, "xX");
s = "the quick brown fox jumped over the lazy dogs";
reps = absl::StrReplaceAll(
{
{"brown", "box"},
{"dogs", "jugs"},
{"fox", "with"},
{"jumped", "five"},
{"over", "dozen"},
{"quick", "my"},
{"the", "pack"},
{"the lazy", "liquor"},
},
&s);
EXPECT_EQ(reps, 8);
EXPECT_EQ(s, "pack my box with five dozen liquor jugs");
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_replace.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_replace_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
914cb876-e268-4338-974a-bdc42eb9ee1a | cpp | tensorflow/tensorflow | triton_tiling_propagation | third_party/xla/xla/service/gpu/triton_tiling_propagation.cc | third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc | #include "xla/service/gpu/triton_tiling_propagation.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/fusions/triton/triton_support_legacy.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
FilterTrivialDims(
const absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>&
dim_iter_specs) {
absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>
non_trivial_dim_iteration_specs;
for (const auto& [dim, dim_spec] : dim_iter_specs) {
if (dim_spec.size() == 1 && dim_spec[0].count == 1) {
continue;
}
non_trivial_dim_iteration_specs[dim] = dim_spec;
}
return non_trivial_dim_iteration_specs;
}
}
const TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find(
const int dimension) const {
if (auto it = dim_iteration_specs_.find(dimension);
it != dim_iteration_specs_.end()) {
return &it->second;
}
return nullptr;
}
std::vector<int> TensorIterationSpec::GetDimensions() const {
std::vector<int> result;
result.reserve(dim_iteration_specs_.size());
for (const auto& [dim, _] : dim_iteration_specs_) {
result.push_back(dim);
}
return result;
}
bool TensorIterationSpec::IsPhysicallyEquivalent(
const TensorIterationSpec& other) const {
const absl::flat_hash_map<int, DimIterationSpec>
non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_);
const absl::flat_hash_map<int, DimIterationSpec>
other_non_trivial_dim_iteration_specs =
FilterTrivialDims(other.dim_iteration_specs_);
if (non_trivial_dim_iteration_specs.size() !=
other_non_trivial_dim_iteration_specs.size()) {
return false;
}
for (const auto& pair : non_trivial_dim_iteration_specs) {
int dimension = pair.first;
const DimIterationSpec& dim_iter_spec = pair.second;
auto other_it = other_non_trivial_dim_iteration_specs.find(dimension);
if (other_it == other_non_trivial_dim_iteration_specs.end()) {
return false;
}
const DimIterationSpec& other_dim_iter_spec = other_it->second;
if (dim_iter_spec.size() != other_dim_iter_spec.size()) {
return false;
}
for (size_t i = 0; i < dim_iter_spec.size(); i++) {
if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) {
return false;
}
}
}
return true;
}
std::string TensorIterationSpec::IterationSpecFragment::ToString() const {
return absl::StrCat("{stride=", stride, ", count=", count,
", slice_start=", slice_start,
", sliced_count=", sliced_count, ", subfragments=[",
absl::StrJoin(subfragments, ", "), "]}");
}
std::string TensorIterationSpec::ToString() const {
return absl::StrCat(
"{",
absl::StrJoin(dim_iteration_specs_, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, kv.first, ": ", "[",
absl::StrJoin(kv.second, ", ",
[&](std::string* ss, const auto& v) {
absl::StrAppend(ss, v.ToString());
}),
"]");
}),
"}");
}
namespace triton_fusion {
using Fragment = DimensionOrder::Fragment;
using Fragments = DimensionOrder::Fragments;
using FragmentOrders = DimensionOrder::FragmentOrders;
DimensionOrder DimensionOrder::FromDotOperandOrOutput(
const HloInstruction& hlo, const int split_k_dimension_index) {
DimensionOrder dim_order;
dim_order.tensor_fragments_order_.reserve(hlo.shape().rank());
for (const int i : hlo.shape().layout().minor_to_major()) {
int target_dim_number = i;
if (i == split_k_dimension_index) {
CHECK(!dim_order.tensor_fragments_order_.empty())
<< "The split-K batch dimension has be preceded by the contracting "
"dimension it originates from by construction.";
target_dim_number =
dim_order.tensor_fragments_order_.back().dst_dim_number();
}
dim_order.dim_fragments_orders_[target_dim_number].push_back(
dim_order.tensor_fragments_order_.size());
dim_order.tensor_fragments_order_.push_back(
Fragment{target_dim_number, hlo.shape().dimensions(i)});
}
return dim_order;
}
std::string DimensionOrder::Fragment::ToString() const {
return absl::StrCat(dst_dim_number_, ":", count_, ":", slice_start_, "-",
sliced_count_);
}
std::string DimensionOrder::ToString() const {
std::string ret = absl::StrJoin(tensor_fragments_order_, " - ",
[](std::string* out, const Fragment& f) {
absl::StrAppend(out, f.ToString(), " ");
});
absl::StrAppend(&ret, "|");
for (const auto& [dim, fragments] : dim_fragments_orders_) {
absl::StrAppend(&ret, dim, ":", absl::StrJoin(fragments, ","), " ");
}
return ret;
}
TensorIterationSpec DimensionOrder::ToTensorIterationSpec() const {
const Fragments& dim_fragments = TensorFragmentsOrder();
TensorIterationSpec tensor_spec;
int64_t accumulated_stride = 1;
int last_dim = -1;
for (int dim_order_index = 0; dim_order_index < dim_fragments.size();
++dim_order_index) {
const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index];
VLOG(6) << fragment.ToString();
TensorIterationSpec::DimIterationSpec& dim_spec =
tensor_spec[fragment.dst_dim_number()];
if (last_dim == fragment.dst_dim_number()) {
if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() &&
dim_spec.back().subfragments.back() == 1) {
dim_spec.back().subfragments.pop_back();
}
if (fragment.full_count() > 1) {
CHECK(!dim_spec.empty());
CHECK(!dim_spec.back().is_sliced())
<< "Only the major-most fragment can have an offset.";
dim_spec.back().slice_start =
fragment.slice_start() * dim_spec.back().count;
dim_spec.back().sliced_count =
fragment.sliced_count() * dim_spec.back().count;
dim_spec.back().count *= fragment.full_count();
dim_spec.back().subfragments.push_back(fragment.sliced_count());
}
} else {
dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{
accumulated_stride,
fragment.full_count(),
fragment.slice_start(),
fragment.sliced_count(),
{fragment.sliced_count()}});
}
accumulated_stride *= fragment.full_count();
last_dim = fragment.dst_dim_number();
}
for (int dim_idx : tensor_spec.GetDimensions()) {
TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx];
if (dim_spec.size() <= 1) continue;
TensorIterationSpec::DimIterationSpec filtered_dim_spec;
absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec),
[](const TensorIterationSpec::IterationSpecFragment& f) {
return f.count != 1;
});
tensor_spec[dim_idx] = filtered_dim_spec;
}
tensor_spec.RemoveEmptyDimensions();
return tensor_spec;
}
namespace {
std::optional<int> LogicalIndexOfLabeledDimension(
const Shape& shape, const DimensionOrder& dim_order, const int label) {
auto fragment_it = dim_order.TensorFragmentsOrder().cbegin();
for (int dim : shape.layout().minor_to_major()) {
const int64_t dim_size = shape.dimensions()[dim];
int64_t fragments_size = 1;
while (fragments_size < dim_size) {
fragments_size *= fragment_it->full_count();
if (fragment_it->dst_dim_number() == label) {
return dim;
}
++fragment_it;
}
}
return std::nullopt;
}
using Int64OrError = std::variant<int64_t, FusionDecision>;
Int64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) {
if (a == b || b == kNoSplitRequirement) {
return a;
}
if (a == kNoSplitRequirement) {
return b;
}
return FusionDecision::Forbid("Conflicting splits of splittable dimension");
}
}
DotRequirementsOrError CombineDotRequirements(
DotRequirements a, DotRequirementsOrError b_or_error) {
if (std::holds_alternative<FusionDecision>(b_or_error)) {
return b_or_error;
}
const DotRequirements& b = std::get<DotRequirements>(b_or_error);
Int64OrError combined_size_req =
CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size,
b.splittable_dimension_major_part_size);
if (std::holds_alternative<FusionDecision>(combined_size_req)) {
return std::get<FusionDecision>(combined_size_req);
}
return DotRequirements(std::get<int64_t>(combined_size_req));
}
namespace {
DotRequirementsOrError GetRequirementsIfSupportedOrder(
const DimensionOrder& order, const DotProperties& properties) {
VLOG(8) << order.ToString();
int64_t split_dim_major_part = kNoSplitRequirement;
const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder();
for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) {
CHECK(!dim_fragments.empty());
for (int i = 0; i < dim_fragments.size() - 1; ++i) {
if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) {
return FusionDecision::Forbid("Sliced non-major-most fragment.");
}
}
int group_counter = 0;
int last_seen_group_last_fragment_index = -1;
auto fragment_it = dim_fragments.cbegin();
while (true) {
if (fragment_it == dim_fragments.cend()) {
break;
}
int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count();
while ((fragment_it + 1) != dim_fragments.cend() &&
*(fragment_it + 1) == *fragment_it + 1) {
++fragment_it;
grouped_size *= tensor_dim_fragments[*fragment_it].full_count();
}
if (grouped_size == 1) {
++fragment_it;
continue;
}
if (last_seen_group_last_fragment_index > *fragment_it) {
return FusionDecision::Forbid("Transpose within a dimension.");
}
++group_counter;
if (group_counter > 1) {
const int splittable_dimension_index =
properties.splittable_dimension_index;
if (dim_index == splittable_dimension_index) {
if (group_counter == 2) {
if (split_dim_major_part != kNoSplitRequirement &&
split_dim_major_part != grouped_size) {
return FusionDecision::Forbid(
"Conflicting splits of splittable dimension");
}
split_dim_major_part = grouped_size;
} else if (group_counter > 2) {
return FusionDecision::Forbid(
"2nd split of a splittable dimension.");
}
} else {
return FusionDecision::Forbid("Unsupported split of a dimension.");
}
}
last_seen_group_last_fragment_index = *fragment_it;
++fragment_it;
}
}
return DotRequirements(split_dim_major_part);
}
DotRequirementsOrError GetRequirementsIfSupportedOrders(
const HloInstruction& hlo, const DimOrderMap& dim_orders,
const DotProperties& properties) {
const DotRequirements empty_requirements(kNoSplitRequirement);
auto get_requirements =
[&](const HloInstruction& instr) -> DotRequirementsOrError {
if (auto it = dim_orders.find(&instr); it != dim_orders.end()) {
return GetRequirementsIfSupportedOrder(it->second, properties);
}
return empty_requirements;
};
DotRequirements requirements = empty_requirements;
for (const HloInstruction* operand : hlo.operands()) {
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements, get_requirements(*operand));
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return requirements_or_error;
}
requirements = std::get<DotRequirements>(requirements_or_error);
}
return CombineDotRequirements(requirements, get_requirements(hlo));
}
DimOrderMap GetPropagatedDimOrdersForElementwise(
const HloInstruction& hlo, TransformDirection direction,
const DimensionOrder& src_dim_order) {
if (direction == TransformDirection::kOutputToInput) {
DimOrderMap map;
for (const HloInstruction* operand : hlo.operands()) {
map.insert({operand, src_dim_order});
}
return map;
}
return {{&hlo, src_dim_order}};
}
const HloInstruction& GetSourceHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_GE(hlo.operand_count(), 1);
if (direction == TransformDirection::kOutputToInput) {
return hlo;
}
return *hlo.operand(0);
}
using ConstInstructionVector = absl::InlinedVector<const HloInstruction*, 2>;
ConstInstructionVector GetDestHlos(const HloInstruction& hlo,
TransformDirection direction) {
if (direction == TransformDirection::kInputToOutput) {
return {&hlo};
}
ConstInstructionVector hlos;
hlos.reserve(hlo.operands().size());
for (const HloInstruction* operand : hlo.operands()) {
hlos.push_back(operand);
}
return hlos;
}
const HloInstruction& GetDestHlo(const HloInstruction& hlo,
TransformDirection direction) {
CHECK_EQ(hlo.operand_count(), 1);
if (direction == TransformDirection::kInputToOutput) {
return hlo;
}
return *hlo.operand(0);
}
DimOrderMapOrError GetPropagatedDimOrdersForBitcast(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
const HloInstruction& dst = GetDestHlo(hlo, direction);
const Shape& dst_shape = dst.shape();
const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder();
DimOrderMap dst_dim_orders;
DimensionOrder& dst_dim_order =
dst_dim_orders.insert({&dst, DimensionOrder()}).first->second;
Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();
int64_t dst_remaining_size = 1;
absl::flat_hash_map<const Fragment*, std::vector<int>> src_to_dst;
auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin();
const auto dst_dim_end = dst_shape.layout().minor_to_major().cend();
for (auto src_dim = src_fragments_order.cbegin();
src_dim != src_fragments_order.cend(); ++src_dim) {
auto add_new_fragment = [&](const Fragment& fragment) {
dst_fragments_order.push_back(fragment);
src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1);
};
if (dst_remaining_size >= src_dim->full_count()) {
if (dst_remaining_size % src_dim->full_count()) {
return FusionDecision::Forbid("Unsupported bitcast");
}
add_new_fragment(*src_dim);
dst_remaining_size /= src_dim->full_count();
} else {
int64_t src_remaining_size = src_dim->full_count();
if (dst_remaining_size > 1) {
if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) {
return FusionDecision::Forbid("Unsupported bitcast");
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), dst_remaining_size});
src_remaining_size /= dst_remaining_size;
dst_remaining_size = 1;
}
while (src_remaining_size > 1) {
CHECK(dst_dim_it != dst_dim_end);
int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it);
int64_t new_fragment_size = dst_dim_size;
if (dst_dim_size > src_remaining_size) {
if (dst_dim_size % src_remaining_size) {
return FusionDecision::Forbid("Unsupported bitcast");
}
dst_remaining_size = dst_dim_size / src_remaining_size;
new_fragment_size = src_remaining_size;
}
if (src_dim->is_sliced()) {
return FusionDecision::Forbid("Unsupported bitcast");
}
add_new_fragment(
Fragment{src_dim->dst_dim_number(), new_fragment_size});
src_remaining_size /= new_fragment_size;
++dst_dim_it;
}
}
}
CHECK_EQ(dst_remaining_size, 1);
while (dst_dim_it != dst_dim_end) {
if (dst_shape.dimensions(*dst_dim_it) != 1) {
return FusionDecision::Forbid("Unsupported bitcast");
}
if (!dst_fragments_order.empty()) {
dst_fragments_order.push_back(
Fragment{dst_fragments_order.back().dst_dim_number(), 1});
src_to_dst[&src_fragments_order.back()].push_back(
dst_fragments_order.size() - 1);
}
++dst_dim_it;
}
FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders();
for (const auto& [dim_index, dim_sequence] :
src_dim_order.DimFragmentsOrders()) {
std::vector<int>& dst = dst_dim_fragment_orders[dim_index];
dst.reserve(dim_sequence.size());
for (const int src : dim_sequence) {
std::copy(src_to_dst[&src_fragments_order[src]].cbegin(),
src_to_dst[&src_fragments_order[src]].cend(),
std::back_inserter(dst));
}
}
return dst_dim_orders;
}
DimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp(
const HloInstruction& hlo, const TransformDirection direction,
const DimensionOrder& src_dim_order, const DotProperties& properties) {
std::list<Fragment> new_fragments;
const HloInstruction& src = GetSourceHlo(hlo, direction);
Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder();
if (hlo.opcode() == HloOpcode::kSlice &&
ShapeUtil::IsEffectiveScalar(hlo.shape())) {
return FusionDecision::Forbid("Slice to scalar is not implemented yet.");
}
std::vector<std::vector<Fragment*>> src_physical;
src_physical.reserve(src.shape().rank());
if (src_fragments_order.size() < src.shape().rank()) {
return FusionDecision::Forbid(
"Cannot propagate further from trivial sized tensor");
}
auto src_fragment_it = src_fragments_order.begin();
for (int64_t dim_index : src.shape().layout().minor_to_major()) {
const int64_t dim_size = src.shape().dimensions(dim_index);
int64_t subdim_size_accumulator = 1;
std::vector<Fragment*> subdim_group;
do {
CHECK(src_fragment_it != src_fragments_order.end());
subdim_size_accumulator *= src_fragment_it->full_count();
subdim_group.push_back(&*src_fragment_it);
++src_fragment_it;
} while (subdim_size_accumulator < dim_size);
CHECK_EQ(subdim_size_accumulator, dim_size);
src_physical.push_back(subdim_group);
}
std::vector<std::vector<Fragment*>> src_logical;
src_logical.resize(src_physical.size());
for (int i = 0; i < src_physical.size(); ++i) {
src_logical[src.shape().layout().minor_to_major(i)] = src_physical[i];
}
DimOrderMap dst_dim_orders;
int64_t concat_accumulated_size = 0;
for (const HloInstruction* dst : GetDestHlos(hlo, direction)) {
DimensionOrder& dst_dim_order =
dst_dim_orders.insert({dst, DimensionOrder()}).first->second;
std::vector<std::vector<Fragment*>> dst_logical;
if (hlo.opcode() == HloOpcode::kTranspose) {
const auto* transpose = Cast<HloTransposeInstruction>(&hlo);
std::vector<int64_t> permutation(transpose->dimensions().cbegin(),
transpose->dimensions().cend());
if (direction == TransformDirection::kInputToOutput) {
permutation = InversePermutation(permutation);
}
dst_logical.resize(permutation.size());
for (int i = 0; i < permutation.size(); ++i) {
dst_logical[permutation[i]] = src_logical[i];
}
} else if (hlo.opcode() == HloOpcode::kBroadcast) {
const auto* broadcast = Cast<HloBroadcastInstruction>(&hlo);
dst_logical.resize(broadcast->dimensions().size());
for (int i = 0; i < broadcast->dimensions().size(); ++i) {
dst_logical[i] = src_logical[broadcast->dimensions()[i]];
}
} else if (hlo.opcode() == HloOpcode::kReduce) {
if (dst != &hlo && hlo.operand_index(dst) == 1) {
continue;
}
const auto* reduce = Cast<HloReduceInstruction>(&hlo);
dst_logical.resize(src_logical.size() + reduce->dimensions().size());
if (reduce->dimensions().size() != 1) {
return FusionDecision::Forbid("Unsupported reduction.");
} else if (reduce->dimensions().front() !=
reduce->operand(0)->shape().rank() - 1) {
return FusionDecision::Forbid("Only row reductions are supported.");
}
} else if (hlo.opcode() == HloOpcode::kConcatenate) {
dst_logical.resize(src_logical.size());
for (int i = 0; i < src_logical.size(); ++i) {
if (i == hlo.concatenate_dimension()) {
if (src_logical[i].size() != 1 || src_logical[i][0]->is_sliced()) {
return FusionDecision::Forbid("Unsupported concatenation.");
}
const Fragment& src_fragment = *src_logical[i][0];
Fragment& dst_fragment = new_fragments.emplace_back(
src_fragment.dst_dim_number(), dst->shape().dimensions(i));
dst_fragment.set_slice(-concat_accumulated_size,
dst->shape().dimensions(i));
concat_accumulated_size += dst->shape().dimensions(i);
dst_logical[i].push_back(&dst_fragment);
} else {
dst_logical[i] = src_logical[i];
}
}
} else if (hlo.opcode() == HloOpcode::kCopy) {
CHECK(ShapeUtil::SameDimensions(src.shape(), dst->shape()));
dst_logical = src_logical;
} else if (hlo.opcode() == HloOpcode::kPad) {
if (dst != &hlo && hlo.operand_index(dst) == 1) {
continue;
}
const auto* pad = Cast<HloPadInstruction>(&hlo);
dst_logical.resize(src_logical.size());
for (int i = 0; i < src_logical.size(); ++i) {
const int padding =
pad->padding_config().dimensions(i).edge_padding_high();
CHECK_EQ(pad->padding_config().dimensions(i).edge_padding_low(), 0);
CHECK_EQ(pad->padding_config().dimensions(i).interior_padding(), 0);
if (padding == 0) {
dst_logical[i] = src_logical[i];
} else {
const std::vector<Fragment*>& fragments = src_logical[i];
CHECK_GE(fragments.size(), 2);
CHECK(absl::c_all_of(fragments, [&](const Fragment* fragment) {
return fragment->dst_dim_number() ==
fragments.front()->dst_dim_number();
}));
std::vector<Fragment*> non_trivial_fragments;
absl::c_copy_if(fragments, std::back_inserter(non_trivial_fragments),
[](const Fragment* fragment) {
return fragment->full_count() > 1;
});
CHECK_EQ(non_trivial_fragments.size(), 2);
new_fragments.emplace_back(
non_trivial_fragments[0]->dst_dim_number(),
non_trivial_fragments[0]->full_count() *
non_trivial_fragments[1]->full_count() -
padding);
dst_logical[i] = {&new_fragments.back()};
}
}
} else if (hlo.opcode() == HloOpcode::kSlice) {
const auto slice = Cast<HloSliceInstruction>(&hlo);
dst_logical.resize(src_logical.size());
for (int dim = 0; dim < src_logical.size(); ++dim) {
dst_logical[dim] = src_logical[dim];
if (slice->slice_limits(dim) - slice->slice_starts(dim) !=
dst->shape().dimensions(dim)) {
if (dst_logical[dim].size() > 1) {
return FusionDecision::Forbid("Slicing of fragmented dimension.");
}
auto fragment = dst_logical[dim].front();
fragment->set_count(dst->shape().dimensions(dim));
fragment->set_slice(
fragment->slice_start() + slice->slice_starts(dim),
fragment->sliced_count());
}
}
} else if (hlo.opcode() == HloOpcode::kDynamicSlice) {
if (dst != &hlo && hlo.operand_index(dst) >= 1) {
continue;
}
const auto dynamic_slice = Cast<HloDynamicSliceInstruction>(&hlo);
dst_logical.resize(src_logical.size());
for (int dim = 0; dim < src_logical.size(); ++dim) {
dst_logical[dim] = src_logical[dim];
if (dynamic_slice->slice_sizes(dim) != dst->shape().dimensions(dim)) {
if (dst_logical[dim].size() > 1) {
return FusionDecision::Forbid("Slicing of fragmented dimension.");
}
auto fragment = dst_logical[dim].front();
fragment->set_count(dst->shape().dimensions(dim));
fragment->set_slice(fragment->slice_start(),
dst->shape().dimensions(dim));
}
}
} else {
return FusionDecision::Forbid("Function called on a wrong instruction.");
}
absl::flat_hash_map<const Fragment*, int> src_to_dst;
Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();
FragmentOrders& dst_dim_fragments_order =
dst_dim_order.DimFragmentsOrders();
absl::flat_hash_set<int> dim_numbers_present_in_dst;
for (const int64_t dim_idx : dst->shape().layout().minor_to_major()) {
for (const Fragment* subdim : dst_logical[dim_idx]) {
dst_fragments_order.push_back(*subdim);
src_to_dst[subdim] = dst_fragments_order.size() - 1;
dim_numbers_present_in_dst.insert(subdim->dst_dim_number());
}
}
for (const auto& [dim_index, dim_sequence] :
src_dim_order.DimFragmentsOrders()) {
for (const int fragment_number : dim_sequence) {
const auto it = src_to_dst.find(&src_fragments_order[fragment_number]);
if (it == src_to_dst.cend()) {
if (hlo.opcode() == HloOpcode::kBroadcast &&
src_fragments_order[fragment_number].full_count() > 1 &&
dim_numbers_present_in_dst.contains(dim_index)) {
return FusionDecision::Forbid("Unsupported broadcast");
}
continue;
}
dst_dim_fragments_order[dim_index].push_back(it->second);
}
}
}
return dst_dim_orders;
}
DimOrderMapOrError GetPropagatedDimOrders(const HloInstruction& hlo,
const TransformDirection direction,
const DimensionOrder& src_dim_order,
const DotProperties& properties) {
VLOG(7) << "Analyzing " << hlo.ToString();
if (hlo.opcode() != HloOpcode::kParameter &&
direction == TransformDirection::kOutputToInput &&
absl::c_any_of(hlo.users(), [](const HloInstruction* user) {
return (user->opcode() == HloOpcode::kConcatenate ||
user->opcode() == HloOpcode::kDynamicSlice);
})) {
return FusionDecision::Forbid(
"No fusion into concatenations or dynamic slice.");
}
if (hlo.opcode() == HloOpcode::kParameter ||
hlo_query::IsScalarConstant(&hlo)) {
CHECK(direction == TransformDirection::kOutputToInput);
return DimOrderMap{};
} else if (hlo.opcode() == HloOpcode::kTranspose ||
hlo.opcode() == HloOpcode::kCopy) {
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kBroadcast) {
if (direction != TransformDirection::kOutputToInput) {
return FusionDecision::Forbid("Unsupported broadcast direction.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kPad) {
if (direction != TransformDirection::kOutputToInput) {
return FusionDecision::Forbid("Unsupported pad direction.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.operand_count() > 0 &&
legacy_triton::IsTritonSupportedElementwiseUpToFloatNormalization(
hlo.opcode(), hlo.operand(0)->shape().element_type())) {
return GetPropagatedDimOrdersForElementwise(hlo, direction, src_dim_order);
} else if (hlo.opcode() == HloOpcode::kBitcast) {
return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kSlice) {
if (direction != TransformDirection::kOutputToInput) {
return FusionDecision::Forbid("Unsupported slice direction.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kDynamicSlice &&
direction == TransformDirection::kOutputToInput) {
if (CodegenDecision decision = legacy_triton::IsTritonSupportedDynamicSlice(
*Cast<HloDynamicSliceInstruction>(&hlo));
!decision.CanFuse()) {
return decision;
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kReshape) {
if (!ShapeUtil::ReshapeIsBitcast(hlo.operand(0)->shape(), hlo.shape())) {
return FusionDecision::Forbid("Non-bitcast reshape.");
}
return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,
properties);
} else if (hlo.opcode() == HloOpcode::kConcatenate &&
direction == TransformDirection::kOutputToInput) {
int64_t noncontracting_dim_label = properties.noncontracting_dimension;
const FragmentOrders& src_dim_fragments_orders =
src_dim_order.DimFragmentsOrders();
auto noncontracting_dim_fragment_order_it =
src_dim_fragments_orders.find(noncontracting_dim_label);
if (noncontracting_dim_fragment_order_it !=
src_dim_fragments_orders.end()) {
if (noncontracting_dim_fragment_order_it->second.size() > 1) {
return FusionDecision::Forbid(
"Concatenations on split non-contracting dimensions are "
"unsupported.");
}
}
auto dim = LogicalIndexOfLabeledDimension(hlo.shape(), src_dim_order,
noncontracting_dim_label);
if (!dim.has_value() || dim.value() != hlo.concatenate_dimension()) {
return FusionDecision::Forbid("Unsupported concatenation.");
}
if (absl::c_any_of(hlo.operands(), [&hlo](const HloInstruction* operand) {
constexpr int kMinConcatFragmentSize = 64;
return operand->shape().dimensions(hlo.concatenate_dimension()) %
kMinConcatFragmentSize !=
0;
})) {
return FusionDecision::Forbid(
"At least one operand of concatenation can not be perfectly tiled.");
}
return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,
properties);
}
return FusionDecision::Forbid("Unimplemented instruction.");
}
int64_t InputMinusOutputBytes(const HloInstruction& hlo) {
CHECK(!hlo.shape().IsTuple());
int64_t input_size = 0;
for (const HloInstruction* operand : hlo.operands()) {
CHECK(!operand->shape().IsTuple());
input_size += ShapeUtil::ByteSizeOf(operand->shape());
}
return input_size - ShapeUtil::ByteSizeOf(hlo.shape());
}
bool CanNotBeFusedIntoAUser(const HloInstruction& hlo) {
return hlo.IsRoot() || (hlo.user_count() == 1 && hlo.users()[0]->IsRoot() &&
hlo.users()[0]->opcode() == HloOpcode::kTuple);
}
constexpr int kIoToleranceBytes = 1024;
bool IsInputWorthFusing(const HloInstruction& hlo) {
if (InputMinusOutputBytes(hlo) <= kIoToleranceBytes) {
return true;
}
if (hlo.user_count() > 1) {
return false;
}
if (hlo.opcode() == HloOpcode::kSlice &&
hlo_query::AllOperandsAreParametersOrConstants(hlo)) {
return true;
}
return hlo_query::AllOperandsAreParametersOrConstantsWithSingleUser(hlo);
}
bool IsOutputWorthFusing(const HloInstruction& hlo) {
return CanNotBeFusedIntoAUser(hlo) ||
InputMinusOutputBytes(hlo) >= -kIoToleranceBytes;
}
FusionDecision IsConversionWorthFusing(const HloInstruction& input,
se::GpuComputeCapability gpu_version) {
if (ShapeUtil::ByteSizeOf(input.operand(0)->shape()) >
ShapeUtil::ByteSizeOf(input.shape())) {
return FusionDecision::Forbid("Narrowing conversion.");
}
return FusionDecision::Allow();
}
}
DimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements(
const HloInstruction& hlo, const DimensionOrder& src_dim_order,
TransformDirection direction, const DotProperties& properties) {
DimOrderMapOrError propagated_dim_orders_or_error =
GetPropagatedDimOrders(hlo, direction, src_dim_order, properties);
if (std::holds_alternative<FusionDecision>(propagated_dim_orders_or_error)) {
return std::get<FusionDecision>(propagated_dim_orders_or_error);
}
DimOrderMap propagated_dim_orders =
std::move(std::get<DimOrderMap>(propagated_dim_orders_or_error));
DotRequirementsOrError requirements_or_error =
GetRequirementsIfSupportedOrders(hlo, propagated_dim_orders, properties);
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return std::get<FusionDecision>(requirements_or_error);
}
return DimOrdersAndReqs{propagated_dim_orders,
std::get<DotRequirements>(requirements_or_error)};
}
DimOrdersAndReqsOrError
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
const HloInstruction& hlo, TransformDirection transform_direction,
const std::optional<int>& src_operand_index,
const DimensionOrder& src_dim_order,
const se::GpuComputeCapability& gpu_version,
const DotProperties& properties) {
CHECK_EQ(transform_direction == TransformDirection::kInputToOutput,
src_operand_index.has_value());
if (hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement) {
return FusionDecision::Forbid("Unsupported instruction.");
}
if (hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kAllReduce ||
hlo.opcode() == HloOpcode::kAllReduceStart ||
hlo.opcode() == HloOpcode::kAllReduceDone) {
return FusionDecision::Forbid("Reductions are not fused yet.");
}
if (hlo.opcode() == HloOpcode::kPad) {
return FusionDecision::Forbid("Pads are not fused yet.");
}
if (auto decision =
legacy_triton::IsTritonSupportedInstruction(hlo, gpu_version);
!decision.CanFuse()) {
return decision;
}
DimOrdersAndReqsOrError result_or_error =
GetPropagatedDimOrdersAndRequirements(hlo, src_dim_order,
transform_direction, properties);
if (std::holds_alternative<FusionDecision>(result_or_error)) {
VLOG(5) << "Not fusing " << hlo.ToString()
<< " to the output due to the decision: "
<< std::get<FusionDecision>(result_or_error).Explain();
return result_or_error;
}
DimOrdersAndReqs dim_orders_and_requirements =
std::move(std::get<DimOrdersAndReqs>(result_or_error));
int fusion_level =
hlo.GetModule()->config().debug_options().xla_gpu_triton_fusion_level();
if (transform_direction == TransformDirection::kOutputToInput) {
if (fusion_level < 2) {
if (hlo.opcode() == HloOpcode::kConvert) {
if (FusionDecision decision = IsConversionWorthFusing(hlo, gpu_version);
!decision) {
return decision;
}
} else if (hlo.IsElementwise() && hlo.opcode() != HloOpcode::kCopy) {
return FusionDecision::Forbid("Ignored elementwise operation");
}
} else {
bool accepted = false;
if (hlo.IsElementwise() && hlo.operand_count() == 2) {
for (const HloInstruction* operand : hlo.operands()) {
if (operand->opcode() == HloOpcode::kBroadcast &&
(operand->operand(0)->opcode() == HloOpcode::kParameter ||
operand->operand(0)->opcode() == HloOpcode::kConstant) &&
std::holds_alternative<DimOrdersAndReqs>(
GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(
*operand, TransformDirection::kOutputToInput,
std::nullopt,
dim_orders_and_requirements.dim_orders.at(operand),
gpu_version, properties))) {
accepted = true;
break;
}
}
}
if (!accepted && !IsInputWorthFusing(hlo)) {
return FusionDecision::Forbid(
"Not obviously profitable to fuse as input.");
}
}
} else {
if (fusion_level < 2) {
return FusionDecision::Forbid(
"Skipping fusing outputs at low fusion levels.");
}
for (int i = 0; i < hlo.operand_count(); ++i) {
const HloInstruction* operand = hlo.operand(i);
if (i == *src_operand_index) {
continue;
}
if ((operand->opcode() == HloOpcode::kBroadcast &&
ShapeUtil::IsScalar(operand->operand(0)->shape())) ||
operand->opcode() == HloOpcode::kParameter) {
continue;
}
return FusionDecision::Forbid(
"Has multiple inputs - not properly analyzed yet.");
}
if (!IsOutputWorthFusing(hlo)) {
return FusionDecision::Forbid(
"Not obviously profitable to fuse as output.");
}
}
return dim_orders_and_requirements;
}
}
}
} | #include "xla/service/gpu/triton_tiling_propagation.h"
#include <vector>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla::gpu {
namespace {
using TritonTilingPropagationTest = HloTestBase;
using triton_fusion::DimensionOrder;
DimensionOrder FromFragments(DimensionOrder::Fragments fragments) {
DimensionOrder dim_order;
DimensionOrder::Fragments& tensor_fragments_order =
dim_order.TensorFragmentsOrder();
DimensionOrder::FragmentOrders& dim_fragments_orders =
dim_order.DimFragmentsOrders();
for (const DimensionOrder::Fragment& fragment : fragments) {
tensor_fragments_order.push_back(fragment);
dim_fragments_orders[fragment.dst_dim_number()].push_back(
tensor_fragments_order.size());
}
return dim_order;
}
TEST_F(
TritonTilingPropagationTest,
DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
DimensionOrder::Fragment fragment_1(0, 97);
DimensionOrder::Fragment fragment_2(0, 1);
DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2});
DimensionOrder::Fragment fragment_3(0, 97);
DimensionOrder::Fragment fragment_4(1, 1);
DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4});
EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2));
}
TEST_F(
TritonTilingPropagationTest,
IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {
TensorIterationSpec::IterationSpecFragment fragment_1 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec spec_1;
spec_1[0].push_back(fragment_1);
TensorIterationSpec::IterationSpecFragment fragment_2 = {
1, 97, 0, 97,
{97}};
TensorIterationSpec::IterationSpecFragment fragment_3 = {
97, 1, 0, 1,
{1}};
TensorIterationSpec spec_2;
spec_2[0].push_back(fragment_2);
spec_2[1].push_back(fragment_3);
EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2));
}
TEST_F(TritonTilingPropagationTest,
DimensionsShouldNotBeRemovedByToTensorIterationSpec) {
DimensionOrder::Fragment fragment_0(0, 97);
DimensionOrder::Fragment fragment_1(1, 1);
DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1});
TensorIterationSpec spec = dimension_order.ToTensorIterationSpec();
const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0);
EXPECT_NE(dim_spec_0, nullptr);
EXPECT_EQ(dim_spec_0->size(), 1);
EXPECT_EQ(dim_spec_0->at(0).count, 97);
const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1);
EXPECT_NE(dim_spec_1, nullptr);
EXPECT_EQ(dim_spec_1->size(), 1);
EXPECT_EQ(dim_spec_1->at(0).count, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94fd6b98-724b-4a98-b0f9-05dcbf12c062 | cpp | tensorflow/tensorflow | async_wrapper | third_party/xla/xla/service/gpu/transforms/async_wrapper.cc | third_party/xla/xla/service/gpu/transforms/async_wrapper_test.cc | #include "xla/service/gpu/transforms/async_wrapper.h"
#include <algorithm>
#include <deque>
#include <iterator>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla::gpu {
absl::StatusOr<bool> AsyncWrapper::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
XLA_VLOG_LINES(
1, absl::StrCat("AsyncWrapper will process the following module:\n",
module->ToString()));
std::deque<HloComputation*> computations;
computations.push_back(module->entry_computation());
while (!computations.empty()) {
HloComputation* computation = computations.front();
computations.pop_front();
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (predicate_(instruction)) {
XLA_VLOG_LINES(
1, absl::StrCat(
"AsyncWrapper will make the following instruction async:\n",
instruction->ToString()));
TF_RETURN_IF_ERROR(
computation
->CreateAsyncInstructions(instruction,
{ShapeUtil::MakeScalarShape(U32)})
.status());
changed = true;
continue;
}
if (instruction->opcode() == HloOpcode::kCall) {
std::copy(instruction->called_computations().begin(),
instruction->called_computations().end(),
std::back_inserter(computations));
}
}
}
XLA_VLOG_LINES(
1,
absl::StrCat("AsyncWrapper finished processing the following module:\n",
module->ToString()));
return changed;
}
} | #include "xla/service/gpu/transforms/async_wrapper.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_interface.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
namespace xla::gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class AsyncWrapperTest : public HloTestBase {};
int CountAsyncInstructions(HloComputation* computation) {
int count = 0;
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsAsynchronous()) ++count;
}
return count;
}
TEST_F(AsyncWrapperTest, BasicFusion) {
const char* hlo_text = R"(
HloModule m
double1 {
p0 = f32[1] parameter(0)
ROOT add = f32[1] add(p0, p0)
}
double2 {
p0 = f32[1] parameter(0)
ROOT add = f32[1] add(p0, p0)
}
ENTRY main {
p0 = f32[1] parameter(0)
agg1 = f32[1] fusion(p0), kind=kLoop, calls=double1
agg2 = f32[1] fusion(p0), kind=kLoop, calls=double2
ROOT done = f32[1] add(agg1, agg2)
})";
std::unique_ptr<VerifiedHloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
AsyncWrapper wrapper([](const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kFusion;
});
EXPECT_THAT(wrapper.HloModulePass::Run(module.get()), IsOkAndHolds(true));
EXPECT_EQ(CountAsyncInstructions(module->entry_computation()), 4);
Literal argument = LiteralUtil::CreateR1<float>({1.0});
Literal expected = LiteralUtil::CreateR1<float>({4.0});
Literal result = ExecuteNoHloPasses(std::move(module), {&argument});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
72eef4bf-eeb4-4ef8-b020-011e9d46a778 | cpp | tensorflow/tensorflow | broadcast_to_op | tensorflow/compiler/tf2xla/kernels/broadcast_to_op.cc | tensorflow/core/kernels/broadcast_to_op_test.cc | #include <vector>
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class BroadcastToOp : public XlaOpKernel {
public:
explicit BroadcastToOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
TensorShape output_shape;
OP_REQUIRES_OK(context,
context->ConstantInputAsShape(
1, &output_shape, xla::ValueInferenceMode::kUpperBound));
auto output_status_or =
BroadcastTo(context->Input(0), output_shape.dim_sizes());
OP_REQUIRES_OK(context, output_status_or.status());
auto output = output_status_or.value();
std::vector<bool> dynamic_dims;
OP_REQUIRES_OK(
context, context->ResolveInputDynamismIntoPredVector(1, &dynamic_dims));
for (int64_t dim = 0; dim < dynamic_dims.size(); ++dim) {
if (dynamic_dims[dim]) {
output = xla::SetDimensionSize(
output,
xla::Reshape(xla::Slice(context->Input(1), {dim}, {dim + 1}, {1}),
{}),
dim);
}
}
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("BroadcastTo").CompileTimeConstantInput("shape"),
BroadcastToOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename InputShape>
static Graph* BroadcastTo(int dim0, int dim1, InputShape input_shape) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_FLOAT, input_shape(dim0, dim1));
input.flat<float>() = input.flat<float>().setRandom();
Tensor shape(DT_INT32, TensorShape({2}));
shape.flat<int32>()(0) = dim0;
shape.flat<int32>()(1) = dim1;
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, shape))
.Attr("T", DT_FLOAT)
.Attr("Tidx", DT_INT32)
.Finalize(g, &node));
return g;
}
#define BM_BroadcastTo_InnerDim(DIM0, DIM1, type) \
static void BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1( \
::testing::benchmark::State& state) { \
test::Benchmark(#type, \
BroadcastTo(DIM0, DIM1, \
[](int dim0, int dim1) { \
return TensorShape({dim0, 1}); \
}), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * DIM0 * \
DIM1); \
} \
BENCHMARK(BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1)->UseRealTime();
#define BM_BroadcastTo_OuterDim(DIM0, DIM1, type) \
static void BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1( \
::testing::benchmark::State& state) { \
test::Benchmark(#type, \
BroadcastTo(DIM0, DIM1, \
[](int dim0, int dim1) { \
return TensorShape({1, dim1}); \
}), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * DIM0 * \
DIM1); \
} \
BENCHMARK(BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1)->UseRealTime();
BM_BroadcastTo_InnerDim(64, 64, cpu);
BM_BroadcastTo_InnerDim(128, 128, cpu);
BM_BroadcastTo_InnerDim(256, 256, cpu);
BM_BroadcastTo_InnerDim(512, 512, cpu);
BM_BroadcastTo_InnerDim(1024, 1024, cpu);
BM_BroadcastTo_InnerDim(500, 20000, cpu);
BM_BroadcastTo_OuterDim(64, 64, cpu);
BM_BroadcastTo_OuterDim(128, 128, cpu);
BM_BroadcastTo_OuterDim(256, 256, cpu);
BM_BroadcastTo_OuterDim(512, 512, cpu);
BM_BroadcastTo_OuterDim(1024, 1024, cpu);
BM_BroadcastTo_OuterDim(500, 20000, cpu);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/broadcast_to_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/broadcast_to_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c74241c-37bf-4b8c-8147-6df223966067 | cpp | google/tensorstore | stringify | tensorstore/internal/preprocessor/stringify.h | tensorstore/internal/preprocessor/stringify_test.cc | #ifndef TENSORSTORE_INTERNAL_PREPROCESSOR_STRINGIFY_H_
#define TENSORSTORE_INTERNAL_PREPROCESSOR_STRINGIFY_H_
#define TENSORSTORE_PP_STRINGIFY(...) TENSORSTORE_PP_STRINGIFY_IMPL(__VA_ARGS__)
#define TENSORSTORE_PP_STRINGIFY_IMPL(...) #__VA_ARGS__
#endif | #include "tensorstore/internal/preprocessor/stringify.h"
#include <string_view>
namespace {
inline constexpr bool Equal(std::string_view a, std::string_view b) {
return a == b;
}
#define X abc
#define Y abc, def
static_assert(Equal(TENSORSTORE_PP_STRINGIFY(X), "abc"));
static_assert(Equal(TENSORSTORE_PP_STRINGIFY(Y), "abc, def"));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/stringify.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/stringify_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7221b1d8-3e12-4d64-a79a-9293f0400d31 | cpp | google/quiche | test_utils | quiche/http2/adapter/test_utils.cc | quiche/http2/adapter/test_utils_test.cc | #include "quiche/http2/adapter/test_utils.h"
#include <cstring>
#include <optional>
#include <ostream>
#include <vector>
#include "absl/strings/str_format.h"
#include "quiche/http2/adapter/http2_visitor_interface.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/hpack/hpack_encoder.h"
#include "quiche/common/quiche_data_reader.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
using ConnectionError = Http2VisitorInterface::ConnectionError;
std::string EncodeHeaders(const quiche::HttpHeaderBlock& entries) {
spdy::HpackEncoder encoder;
encoder.DisableCompression();
return encoder.EncodeHeaderBlock(entries);
}
}
TestVisitor::DataFrameHeaderInfo TestVisitor::OnReadyToSendDataForStream(
Http2StreamId stream_id, size_t max_length) {
auto it = data_map_.find(stream_id);
if (it == data_map_.end()) {
QUICHE_DVLOG(1) << "Source not in map; returning blocked.";
return {0, false, false};
}
DataPayload& payload = it->second;
if (payload.return_error) {
QUICHE_DVLOG(1) << "Simulating error response for stream " << stream_id;
return {DataFrameSource::kError, false, false};
}
const absl::string_view prefix = payload.data.GetPrefix();
const size_t frame_length = std::min(max_length, prefix.size());
const bool is_final_fragment = payload.data.Read().size() <= 1;
const bool end_data =
payload.end_data && is_final_fragment && frame_length == prefix.size();
const bool end_stream = payload.end_stream && end_data;
return {static_cast<int64_t>(frame_length), end_data, end_stream};
}
bool TestVisitor::SendDataFrame(Http2StreamId stream_id,
absl::string_view frame_header,
size_t payload_bytes) {
const int64_t frame_result = OnReadyToSend(frame_header);
if (frame_result < 0 ||
static_cast<size_t>(frame_result) != frame_header.size()) {
return false;
}
auto it = data_map_.find(stream_id);
if (it == data_map_.end()) {
if (payload_bytes > 0) {
return false;
} else {
return true;
}
}
DataPayload& payload = it->second;
absl::string_view frame_payload = payload.data.GetPrefix();
if (frame_payload.size() < payload_bytes) {
return false;
}
frame_payload = frame_payload.substr(0, payload_bytes);
const int64_t payload_result = OnReadyToSend(frame_payload);
if (payload_result < 0 ||
static_cast<size_t>(payload_result) != frame_payload.size()) {
return false;
}
payload.data.RemovePrefix(payload_bytes);
return true;
}
void TestVisitor::AppendPayloadForStream(Http2StreamId stream_id,
absl::string_view payload) {
auto char_data = std::unique_ptr<char[]>(new char[payload.size()]);
std::copy(payload.begin(), payload.end(), char_data.get());
data_map_[stream_id].data.Append(std::move(char_data), payload.size());
}
void TestVisitor::SetEndData(Http2StreamId stream_id, bool end_stream) {
DataPayload& payload = data_map_[stream_id];
payload.end_data = true;
payload.end_stream = end_stream;
}
void TestVisitor::SimulateError(Http2StreamId stream_id) {
DataPayload& payload = data_map_[stream_id];
payload.return_error = true;
}
std::pair<int64_t, bool> TestVisitor::PackMetadataForStream(
Http2StreamId stream_id, uint8_t* dest, size_t dest_len) {
auto it = outbound_metadata_map_.find(stream_id);
if (it == outbound_metadata_map_.end()) {
return {-1, false};
}
const size_t to_copy = std::min(it->second.size(), dest_len);
auto* src = reinterpret_cast<uint8_t*>(it->second.data());
std::copy(src, src + to_copy, dest);
it->second = it->second.substr(to_copy);
if (it->second.empty()) {
outbound_metadata_map_.erase(it);
return {to_copy, true};
}
return {to_copy, false};
}
void TestVisitor::AppendMetadataForStream(
Http2StreamId stream_id, const quiche::HttpHeaderBlock& payload) {
outbound_metadata_map_.insert({stream_id, EncodeHeaders(payload)});
}
VisitorDataSource::VisitorDataSource(Http2VisitorInterface& visitor,
Http2StreamId stream_id)
: visitor_(visitor), stream_id_(stream_id) {}
bool VisitorDataSource::send_fin() const { return has_fin_; }
std::pair<int64_t, bool> VisitorDataSource::SelectPayloadLength(
size_t max_length) {
auto [payload_length, end_data, end_stream] =
visitor_.OnReadyToSendDataForStream(stream_id_, max_length);
has_fin_ = end_stream;
return {payload_length, end_data};
}
bool VisitorDataSource::Send(absl::string_view frame_header,
size_t payload_length) {
return visitor_.SendDataFrame(stream_id_, frame_header, payload_length);
}
TestMetadataSource::TestMetadataSource(const quiche::HttpHeaderBlock& entries)
: encoded_entries_(EncodeHeaders(entries)) {
remaining_ = encoded_entries_;
}
std::pair<int64_t, bool> TestMetadataSource::Pack(uint8_t* dest,
size_t dest_len) {
if (fail_when_packing_) {
return {-1, false};
}
const size_t copied = std::min(dest_len, remaining_.size());
std::memcpy(dest, remaining_.data(), copied);
remaining_.remove_prefix(copied);
return std::make_pair(copied, remaining_.empty());
}
namespace {
using TypeAndOptionalLength =
std::pair<spdy::SpdyFrameType, std::optional<size_t>>;
std::ostream& operator<<(
std::ostream& os,
const std::vector<TypeAndOptionalLength>& types_and_lengths) {
for (const auto& type_and_length : types_and_lengths) {
os << "(" << spdy::FrameTypeToString(type_and_length.first) << ", "
<< (type_and_length.second ? absl::StrCat(type_and_length.second.value())
: "<unspecified>")
<< ") ";
}
return os;
}
std::string FrameTypeToString(uint8_t frame_type) {
if (spdy::IsDefinedFrameType(frame_type)) {
return spdy::FrameTypeToString(spdy::ParseFrameType(frame_type));
} else {
return absl::StrFormat("0x%x", static_cast<int>(frame_type));
}
}
class SpdyControlFrameMatcher
: public testing::MatcherInterface<absl::string_view> {
public:
explicit SpdyControlFrameMatcher(
std::vector<TypeAndOptionalLength> types_and_lengths)
: expected_types_and_lengths_(std::move(types_and_lengths)) {}
bool MatchAndExplain(absl::string_view s,
testing::MatchResultListener* listener) const override {
quiche::QuicheDataReader reader(s.data(), s.size());
for (TypeAndOptionalLength expected : expected_types_and_lengths_) {
if (!MatchAndExplainOneFrame(expected.first, expected.second, &reader,
listener)) {
return false;
}
}
if (!reader.IsDoneReading()) {
*listener << "; " << reader.BytesRemaining() << " bytes left to read!";
return false;
}
return true;
}
bool MatchAndExplainOneFrame(spdy::SpdyFrameType expected_type,
std::optional<size_t> expected_length,
quiche::QuicheDataReader* reader,
testing::MatchResultListener* listener) const {
uint32_t payload_length;
if (!reader->ReadUInt24(&payload_length)) {
*listener << "; unable to read length field for expected_type "
<< FrameTypeToString(expected_type) << ". data too short!";
return false;
}
if (expected_length && payload_length != expected_length.value()) {
*listener << "; actual length: " << payload_length
<< " but expected length: " << expected_length.value();
return false;
}
uint8_t raw_type;
if (!reader->ReadUInt8(&raw_type)) {
*listener << "; unable to read type field for expected_type "
<< FrameTypeToString(expected_type) << ". data too short!";
return false;
}
if (raw_type != static_cast<uint8_t>(expected_type)) {
*listener << "; actual type: " << FrameTypeToString(raw_type)
<< " but expected type: " << FrameTypeToString(expected_type);
return false;
}
reader->Seek(5 + payload_length);
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "Data contains frames of types in sequence "
<< expected_types_and_lengths_;
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "Data does not contain frames of types in sequence "
<< expected_types_and_lengths_;
}
private:
const std::vector<TypeAndOptionalLength> expected_types_and_lengths_;
};
}
testing::Matcher<absl::string_view> EqualsFrames(
std::vector<std::pair<spdy::SpdyFrameType, std::optional<size_t>>>
types_and_lengths) {
return MakeMatcher(new SpdyControlFrameMatcher(std::move(types_and_lengths)));
}
testing::Matcher<absl::string_view> EqualsFrames(
std::vector<spdy::SpdyFrameType> types) {
std::vector<std::pair<spdy::SpdyFrameType, std::optional<size_t>>>
types_and_lengths;
types_and_lengths.reserve(types.size());
for (spdy::SpdyFrameType type : types) {
types_and_lengths.push_back({type, std::nullopt});
}
return MakeMatcher(new SpdyControlFrameMatcher(std::move(types_and_lengths)));
}
}
}
} | #include "quiche/http2/adapter/test_utils.h"
#include <optional>
#include <string>
#include <utility>
#include "quiche/http2/core/spdy_framer.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
using spdy::SpdyFramer;
TEST(EqualsFrames, Empty) {
EXPECT_THAT("", EqualsFrames(std::vector<spdy::SpdyFrameType>{}));
}
TEST(EqualsFrames, SingleFrameWithLength) {
SpdyFramer framer{SpdyFramer::ENABLE_COMPRESSION};
spdy::SpdyPingIR ping{511};
EXPECT_THAT(framer.SerializeFrame(ping),
EqualsFrames({{spdy::SpdyFrameType::PING, 8}}));
spdy::SpdyWindowUpdateIR window_update{1, 101};
EXPECT_THAT(framer.SerializeFrame(window_update),
EqualsFrames({{spdy::SpdyFrameType::WINDOW_UPDATE, 4}}));
spdy::SpdyDataIR data{3, "Some example data, ha ha!"};
EXPECT_THAT(framer.SerializeFrame(data),
EqualsFrames({{spdy::SpdyFrameType::DATA, 25}}));
}
TEST(EqualsFrames, SingleFrameWithoutLength) {
SpdyFramer framer{SpdyFramer::ENABLE_COMPRESSION};
spdy::SpdyRstStreamIR rst_stream{7, spdy::ERROR_CODE_REFUSED_STREAM};
EXPECT_THAT(framer.SerializeFrame(rst_stream),
EqualsFrames({{spdy::SpdyFrameType::RST_STREAM, std::nullopt}}));
spdy::SpdyGoAwayIR goaway{13, spdy::ERROR_CODE_ENHANCE_YOUR_CALM,
"Consider taking some deep breaths."};
EXPECT_THAT(framer.SerializeFrame(goaway),
EqualsFrames({{spdy::SpdyFrameType::GOAWAY, std::nullopt}}));
quiche::HttpHeaderBlock block;
block[":method"] = "GET";
block[":path"] = "/example";
block[":authority"] = "example.com";
spdy::SpdyHeadersIR headers{17, std::move(block)};
EXPECT_THAT(framer.SerializeFrame(headers),
EqualsFrames({{spdy::SpdyFrameType::HEADERS, std::nullopt}}));
}
TEST(EqualsFrames, MultipleFrames) {
SpdyFramer framer{SpdyFramer::ENABLE_COMPRESSION};
spdy::SpdyPingIR ping{511};
spdy::SpdyWindowUpdateIR window_update{1, 101};
spdy::SpdyDataIR data{3, "Some example data, ha ha!"};
spdy::SpdyRstStreamIR rst_stream{7, spdy::ERROR_CODE_REFUSED_STREAM};
spdy::SpdyGoAwayIR goaway{13, spdy::ERROR_CODE_ENHANCE_YOUR_CALM,
"Consider taking some deep breaths."};
quiche::HttpHeaderBlock block;
block[":method"] = "GET";
block[":path"] = "/example";
block[":authority"] = "example.com";
spdy::SpdyHeadersIR headers{17, std::move(block)};
const std::string frame_sequence =
absl::StrCat(absl::string_view(framer.SerializeFrame(ping)),
absl::string_view(framer.SerializeFrame(window_update)),
absl::string_view(framer.SerializeFrame(data)),
absl::string_view(framer.SerializeFrame(rst_stream)),
absl::string_view(framer.SerializeFrame(goaway)),
absl::string_view(framer.SerializeFrame(headers)));
absl::string_view frame_sequence_view = frame_sequence;
EXPECT_THAT(frame_sequence,
EqualsFrames({{spdy::SpdyFrameType::PING, std::nullopt},
{spdy::SpdyFrameType::WINDOW_UPDATE, std::nullopt},
{spdy::SpdyFrameType::DATA, 25},
{spdy::SpdyFrameType::RST_STREAM, std::nullopt},
{spdy::SpdyFrameType::GOAWAY, 42},
{spdy::SpdyFrameType::HEADERS, 19}}));
EXPECT_THAT(frame_sequence_view,
EqualsFrames({{spdy::SpdyFrameType::PING, std::nullopt},
{spdy::SpdyFrameType::WINDOW_UPDATE, std::nullopt},
{spdy::SpdyFrameType::DATA, 25},
{spdy::SpdyFrameType::RST_STREAM, std::nullopt},
{spdy::SpdyFrameType::GOAWAY, 42},
{spdy::SpdyFrameType::HEADERS, 19}}));
EXPECT_THAT(
frame_sequence,
EqualsFrames(
{spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,
spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,
spdy::SpdyFrameType::GOAWAY, spdy::SpdyFrameType::HEADERS}));
EXPECT_THAT(
frame_sequence_view,
EqualsFrames(
{spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,
spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,
spdy::SpdyFrameType::GOAWAY, spdy::SpdyFrameType::HEADERS}));
EXPECT_THAT(
frame_sequence,
testing::Not(EqualsFrames(
{spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,
spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,
spdy::SpdyFrameType::GOAWAY})));
EXPECT_THAT(
frame_sequence_view,
testing::Not(EqualsFrames(
{spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,
spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,
spdy::SpdyFrameType::GOAWAY})));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/test_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/test_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
732356cd-3b81-4dfa-8545-9e40ed39711b | cpp | tensorflow/tensorflow | stablehlo_type_utils | tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h | tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_STABLEHLO_TYPE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_STABLEHLO_TYPE_UTILS_H_
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/StablehloOps.h"
namespace mlir::quant::stablehlo {
inline bool IsStablehloOp(Operation* op) {
return op->getDialect()->getNamespace() ==
mlir::stablehlo::StablehloDialect::getDialectNamespace();
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"
#include <gtest/gtest.h>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "stablehlo/dialect/StablehloOps.h"
namespace mlir::quant::stablehlo {
namespace {
using ::testing::Test;
class StablehloTypeUtilsTest : public Test {
protected:
StablehloTypeUtilsTest() {
ctx_.loadDialect<mlir::stablehlo::StablehloDialect,
mlir::arith::ArithDialect, mlir::func::FuncDialect>();
}
MLIRContext ctx_;
OpBuilder builder_{&ctx_};
};
TEST_F(StablehloTypeUtilsTest, IsStablehloOpSucceedsWithStablehloOp) {
const OwningOpRef<mlir::stablehlo::ConstantOp> constant_op =
builder_.create<mlir::stablehlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32IntegerAttr(0));
EXPECT_TRUE(IsStablehloOp(*constant_op));
}
TEST_F(StablehloTypeUtilsTest, IsStablehloOpFailsWithArithOp) {
const OwningOpRef<mlir::arith::ConstantOp> constant_op =
builder_.create<mlir::arith::ConstantOp>(builder_.getUnknownLoc(),
builder_.getI32IntegerAttr(0));
EXPECT_FALSE(IsStablehloOp(*constant_op));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5254719-24c1-4a2c-931b-bfce7b9085fb | cpp | google/tensorstore | virtual_chunked | tensorstore/driver/virtual_chunked/virtual_chunked.cc | tensorstore/driver/virtual_chunked/virtual_chunked_test.cc | #include "tensorstore/virtual_chunked.h"
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/chunk_cache_driver.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_spec.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/garbage_collection/std_optional.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace virtual_chunked {
namespace {
class VirtualChunkedCache : public internal::ConcreteChunkCache {
using Base = internal::ConcreteChunkCache;
public:
using Base::Base;
template <typename EntryOrNode>
void DoRead(EntryOrNode& node, AsyncCacheReadRequest request);
class Entry : public internal::ChunkCache::Entry {
public:
using OwningCache = VirtualChunkedCache;
using internal::ChunkCache::Entry::Entry;
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).DoRead(*this, std::move(request));
}
};
class TransactionNode : public internal::ChunkCache::TransactionNode {
public:
using OwningCache = VirtualChunkedCache;
using internal::ChunkCache::TransactionNode::TransactionNode;
std::atomic<bool> marked_as_terminal_{false};
absl::Status DoInitialize(
internal::OpenTransactionPtr& transaction) override {
SetReadsCommitted();
return internal::ChunkCache::TransactionNode::DoInitialize(transaction);
}
absl::Status OnModified() override {
if (!marked_as_terminal_.exchange(true, std::memory_order_acq_rel)) {
return this->MarkAsTerminal();
}
return absl::OkStatus();
}
std::string Describe() override;
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).DoRead(*this, std::move(request));
}
void Commit() override;
void InitiateWriteback(absl::Time staleness_bound);
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(
internal::AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
std::vector<Index> grid_origin_for_read_function_;
DimensionUnitsVector dimension_units_;
std::vector<DimensionIndex> inner_order_;
ReadFunction read_function_;
WriteFunction write_function_;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_;
Context::Resource<internal::CachePoolResource> cache_pool_;
};
bool GetPermutedPartialArray(
VirtualChunkedCache::Entry& entry, ArrayView<const void> full_array,
Array<const void, dynamic_rank, offset_origin>& partial_array) {
auto& cache = static_cast<VirtualChunkedCache&>(GetOwningCache(entry));
const auto& component_spec = cache.grid().components.front();
const DimensionIndex rank = component_spec.rank();
span<const Index> cell_shape = component_spec.shape();
span<const Index> cell_indices = entry.cell_indices();
span<const DimensionIndex> inner_order = cache.inner_order_;
span<const Index> grid_origin_for_read_function =
cache.grid_origin_for_read_function_;
BoxView<> domain_bounds = component_spec.array_spec.valid_data_bounds;
partial_array.layout().set_rank(rank);
ByteStridedPointer<const void> data = full_array.byte_strided_pointer();
for (DimensionIndex component_dim = 0; component_dim < rank;
++component_dim) {
const DimensionIndex external_dim = inner_order[component_dim];
const Index byte_stride = full_array.byte_strides()[component_dim];
partial_array.byte_strides()[external_dim] = byte_stride;
Index grid_origin_value = grid_origin_for_read_function[external_dim];
Index chunk_start = cell_indices[component_dim] * cell_shape[component_dim];
Index chunk_end = chunk_start + cell_shape[component_dim];
Index request_start =
std::max(chunk_start, domain_bounds.origin()[component_dim]);
Index request_end =
std::min(chunk_end, domain_bounds[component_dim].exclusive_max());
if (request_start >= request_end) {
return false;
}
partial_array.origin()[external_dim] = request_start + grid_origin_value;
partial_array.shape()[external_dim] = request_end - request_start;
data -= internal::wrap_on_overflow::Multiply(
byte_stride, chunk_start + grid_origin_value);
}
partial_array.element_pointer() =
ElementPointer<const void>(data, full_array.dtype());
return true;
}
template <typename EntryOrNode>
void VirtualChunkedCache::DoRead(EntryOrNode& node,
AsyncCacheReadRequest request) {
auto& cache = GetOwningCache(node);
if (!cache.read_function_) {
node.ReadError(absl::InvalidArgumentError(
"Write-only virtual chunked view requires chunk-aligned writes"));
return;
}
auto& executor = cache.executor();
executor([&node, staleness_bound = request.staleness_bound] {
auto& entry = GetOwningEntry(node);
auto& cache = GetOwningCache(entry);
const auto& component_spec = cache.grid().components.front();
span<const Index> cell_shape = component_spec.shape();
auto full_array = AllocateArray(cell_shape, c_order, default_init,
component_spec.dtype());
Array<const void, dynamic_rank, offset_origin> partial_array;
auto read_data =
tensorstore::internal::make_shared_for_overwrite<ReadData[]>(1);
if (!GetPermutedPartialArray(entry, full_array, partial_array)) {
node.ReadSuccess(
{std::move(read_data),
{StorageGeneration::NoValue(), absl::InfiniteFuture()}});
return;
}
read_data.get()[0] = full_array;
ReadParameters read_params;
read_params.executor_ = cache.executor();
{
ReadLock<ReadData> lock{node};
read_params.if_not_equal_ = lock.stamp().generation;
}
read_params.staleness_bound_ = staleness_bound;
auto read_future =
cache.read_function_(ConstDataTypeCast<void>(std::move(partial_array)),
std::move(read_params));
read_future.Force();
read_future.ExecuteWhenReady(
[&node, read_data = std::move(read_data)](
ReadyFuture<TimestampedStorageGeneration> future) mutable {
auto& r = future.result();
if (!r.ok()) {
node.ReadError(std::move(r).status());
return;
}
if (StorageGeneration::IsUnknown(r->generation)) {
ReadState read_state;
{
ReadLock<ReadData> lock{node};
read_state = lock.read_state();
}
read_state.stamp.time = r->time;
node.ReadSuccess(std::move(read_state));
return;
}
node.ReadSuccess({std::move(read_data), std::move(*r)});
return;
});
});
}
std::string VirtualChunkedCache::TransactionNode::Describe() {
auto& entry = GetOwningEntry(*this);
auto& cache = GetOwningCache(entry);
auto domain = cache.grid().GetValidCellDomain(0, entry.cell_indices());
if (domain.is_empty()) return {};
return tensorstore::StrCat("write to virtual chunk ", domain);
}
void VirtualChunkedCache::TransactionNode::Commit() {
if (!GetOwningCache(*this).write_function_) {
SetError(absl::InternalError(
"No write function specified to virtual_chunked driver"));
this->WritebackError();
return;
}
InitiateWriteback(absl::InfinitePast());
internal::ChunkCache::TransactionNode::Commit();
}
void VirtualChunkedCache::TransactionNode::InitiateWriteback(
absl::Time staleness_bound) {
struct ApplyReceiver {
TransactionNode& self;
void set_value(AsyncCache::ReadState update) {
GetOwningCache(self).executor()(
[node = &self, update = std::move(update)] {
auto* read_data = static_cast<const ReadData*>(update.data.get());
SharedArray<const void> full_array;
auto& entry = GetOwningEntry(*node);
auto& cache = GetOwningCache(*node);
if (read_data && read_data[0].valid()) {
full_array = read_data[0];
} else {
full_array =
node->component_specs()[0].array_spec.GetFillValueForDomain(
cache.grid().GetCellDomain(0, entry.cell_indices()));
}
Array<const void, dynamic_rank, offset_origin> partial_array;
if (!GetPermutedPartialArray(entry, full_array, partial_array)) {
node->WritebackSuccess(
{std::move(update.data),
{StorageGeneration::NoValue(), absl::InfiniteFuture()}});
return;
}
WriteParameters write_params;
write_params.if_equal_ =
StorageGeneration::Clean(update.stamp.generation);
write_params.executor_ = cache.executor();
auto write_future = cache.write_function_(std::move(partial_array),
std::move(write_params));
write_future.Force();
write_future.ExecuteWhenReady(
[node = node, update = std::move(update),
full_array = std::move(full_array)](
ReadyFuture<TimestampedStorageGeneration> future) mutable {
auto& r = future.result();
if (!r.ok()) {
node->SetError(std::move(r).status());
node->WritebackError();
return;
}
if (StorageGeneration::IsUnknown(r->generation)) {
node->InitiateWriteback(r->time);
return;
}
update.stamp = std::move(*r);
node->WritebackSuccess(std::move(update));
});
});
}
void set_error(absl::Status error) {
self.SetError(std::move(error));
self.WritebackError();
}
void set_cancel() { ABSL_UNREACHABLE(); }
};
AsyncCache::TransactionNode::ApplyOptions apply_options;
apply_options.staleness_bound = staleness_bound;
this->DoApply(std::move(apply_options), ApplyReceiver{*this});
}
class VirtualChunkedDriverSpec
: public internal::RegisteredDriverSpec<VirtualChunkedDriverSpec,
internal::DriverSpec> {
public:
constexpr static const char id[] = "virtual_chunked";
std::optional<ReadFunction> read_function;
std::optional<WriteFunction> write_function;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
Context::Resource<internal::CachePoolResource> cache_pool;
StalenessBound data_staleness;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(internal::BaseCast<internal::DriverSpec>(x), x.read_function,
x.write_function, x.data_copy_concurrency, x.cache_pool,
x.data_staleness);
};
OpenMode open_mode() const override {
return OpenMode::open;
}
Future<internal::Driver::Handle> Open(
internal::DriverOpenRequest request) const override;
absl::Status ApplyOptions(SpecOptions&& options) override {
if (options.kvstore.valid()) {
return absl::InvalidArgumentError(
"virtual_chunked driver does not support a kvstore");
}
if (options.recheck_cached_data.specified()) {
data_staleness = StalenessBound(options.recheck_cached_data);
}
if (options.recheck_cached_metadata.specified()) {
return absl::InvalidArgumentError(
"virtual_chunked driver does not support recheck_cached_metadata");
}
return schema.Set(static_cast<Schema&&>(options));
}
};
class VirtualChunkedDriver;
using VirtualChunkedDriverBase = internal::RegisteredDriver<
VirtualChunkedDriver,
internal::ChunkGridSpecificationDriver<
VirtualChunkedCache, internal::ChunkCacheReadWriteDriverMixin<
VirtualChunkedDriver, internal::Driver>>>;
class VirtualChunkedDriver : public VirtualChunkedDriverBase {
using Base = VirtualChunkedDriverBase;
public:
using Base::Base;
Result<internal::TransformedDriverSpec> GetBoundSpec(
internal::OpenTransactionPtr transaction,
IndexTransformView<> transform) override;
static Result<internal::Driver::Handle> OpenFromSpecData(
Transaction transaction, const VirtualChunkedDriverSpec& spec,
ReadWriteMode read_write_mode = ReadWriteMode::dynamic);
Result<CodecSpec> GetCodec() override { return CodecSpec{}; }
Result<DimensionUnitsVector> GetDimensionUnits() override {
return cache()->dimension_units_;
}
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override {
return {std::in_place};
}
Result<ChunkLayout> GetChunkLayout(IndexTransformView<> transform) override {
return internal::GetChunkLayoutFromGrid(cache()->grid().components[0]) |
transform;
}
};
Result<internal::TransformedDriverSpec> VirtualChunkedDriver::GetBoundSpec(
internal::OpenTransactionPtr transaction, IndexTransformView<> transform) {
auto driver_spec = internal::DriverSpec::Make<VirtualChunkedDriverSpec>();
driver_spec->context_binding_state_ = ContextBindingState::bound;
auto& cache = *this->cache();
if (cache.read_function_) {
driver_spec->read_function = cache.read_function_;
}
if (cache.write_function_) {
driver_spec->write_function = cache.write_function_;
}
driver_spec->data_copy_concurrency = cache.data_copy_concurrency_;
driver_spec->cache_pool = cache.cache_pool_;
driver_spec->data_staleness = this->data_staleness_bound();
const DimensionIndex rank = this->rank();
TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(RankConstraint{rank}));
TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(dtype()));
TENSORSTORE_RETURN_IF_ERROR(
driver_spec->schema.Set(Schema::DimensionUnits(cache.dimension_units_)));
TENSORSTORE_RETURN_IF_ERROR(
driver_spec->schema.Set(ChunkLayout::InnerOrder(cache.inner_order_)));
TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(
ChunkLayout::GridOrigin(cache.grid_origin_for_read_function_)));
span<const DimensionIndex> inner_order = cache.inner_order_;
span<const Index> grid_origin_for_read_function =
cache.grid_origin_for_read_function_;
const auto& component_spec = cache.grid().components[component_index()];
IndexTransformBuilder external_to_output_transform_builder(rank, rank);
IndexDomainBuilder external_domain_builder(rank);
Index chunk_shape[kMaxRank];
for (DimensionIndex component_dim = 0; component_dim < rank;
++component_dim) {
const DimensionIndex external_dim = inner_order[component_dim];
const Index offset = grid_origin_for_read_function[external_dim];
chunk_shape[external_dim] = component_spec.shape()[component_dim];
external_to_output_transform_builder.output_single_input_dimension(
external_dim, offset, 1, component_dim);
TENSORSTORE_ASSIGN_OR_RETURN(
external_domain_builder.bounds()[external_dim],
ShiftInterval(
component_spec.array_spec.valid_data_bounds[component_dim],
offset));
}
TENSORSTORE_ASSIGN_OR_RETURN(auto external_to_output_transform,
external_to_output_transform_builder.Finalize());
TENSORSTORE_ASSIGN_OR_RETURN(auto external_domain,
external_domain_builder.Finalize());
TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(
ChunkLayout::ChunkShape(span<const Index>(&chunk_shape[0], rank))));
TENSORSTORE_RETURN_IF_ERROR(
driver_spec->schema.Set(std::move(external_domain)));
internal::TransformedDriverSpec spec;
TENSORSTORE_ASSIGN_OR_RETURN(
spec.transform,
ComposeTransforms(external_to_output_transform, transform));
spec.driver_spec = std::move(driver_spec);
return spec;
}
Result<internal::Driver::Handle> VirtualChunkedDriver::OpenFromSpecData(
Transaction transaction, const VirtualChunkedDriverSpec& spec,
ReadWriteMode read_write_mode) {
if ((read_write_mode & ReadWriteMode::read) == ReadWriteMode::read &&
!spec.read_function) {
return absl::InvalidArgumentError("Reading not supported");
}
if ((read_write_mode & ReadWriteMode::write) == ReadWriteMode::write &&
!spec.write_function) {
return absl::InvalidArgumentError("Writing not supported");
}
if (read_write_mode == ReadWriteMode::dynamic) {
read_write_mode =
(spec.read_function ? ReadWriteMode::read : ReadWriteMode{}) |
(spec.write_function ? ReadWriteMode::write : ReadWriteMode{});
}
const DimensionIndex rank = spec.schema.rank();
if (rank == dynamic_rank) {
return absl::InvalidArgumentError("rank must be specified");
}
DataType dtype = spec.schema.dtype();
if (!dtype.valid()) {
return absl::InvalidArgumentError("dtype must be specified");
}
IndexDomain<> domain = spec.schema.domain();
if (!domain.valid()) {
domain = IndexDomain<>(rank);
}
domain = WithImplicitDimensions(std::move(domain),
false,
false);
Box<> chunk_template(rank);
std::vector<DimensionIndex> inner_order(rank);
{
ChunkLayout chunk_layout = spec.schema.chunk_layout();
if (chunk_layout.codec_chunk_shape().hard_constraint) {
return absl::InvalidArgumentError("codec_chunk_shape not supported");
}
if (spec.schema.fill_value().valid()) {
return absl::InvalidArgumentError("fill_value not supported");
}
TENSORSTORE_RETURN_IF_ERROR(
internal::ChooseReadWriteChunkGrid(chunk_layout, domain.box(),
chunk_template),
tensorstore::MaybeAnnotateStatus(_, "Failed to compute chunk grid"));
if (auto requested_inner_order = chunk_layout.inner_order();
requested_inner_order.valid()) {
std::copy_n(requested_inner_order.begin(), rank, inner_order.begin());
} else {
std::iota(inner_order.begin(), inner_order.end(), DimensionIndex(0));
}
}
auto external_dimension_units = spec.schema.dimension_units();
Box<> adjusted_component_domain(rank);
DimensionUnitsVector component_units(rank);
for (DimensionIndex component_dim = 0; component_dim < rank;
++component_dim) {
const DimensionIndex external_dim = inner_order[component_dim];
TENSORSTORE_ASSIGN_OR_RETURN(
adjusted_component_domain[component_dim],
ShiftIntervalBackward(domain[external_dim],
chunk_template.origin()[external_dim]));
if (external_dimension_units.valid()) {
component_units[component_dim] = external_dimension_units[external_dim];
}
}
internal::Driver::Handle handle;
handle.transaction = std::move(transaction);
{
IndexTransformBuilder transform_builder(rank, rank);
transform_builder.input_domain(domain);
for (DimensionIndex component_dim = 0; component_dim < rank;
++component_dim) {
const DimensionIndex external_dim = inner_order[component_dim];
transform_builder.output_single_input_dimension(
component_dim, -chunk_template.origin()[external_dim], 1,
external_dim);
}
TENSORSTORE_ASSIGN_OR_RETURN(handle.transform,
transform_builder.Finalize());
}
auto cache =
internal::GetCache<VirtualChunkedCache>(spec.cache_pool->get(), "", [&] {
auto fill_value =
BroadcastArray(AllocateArray(span<const Index>{}, c_order,
value_init, spec.schema.dtype()),
BoxView<>(rank))
.value();
std::vector<Index> chunk_shape(rank);
for (DimensionIndex component_dim = 0; component_dim < rank;
++component_dim) {
const DimensionIndex external_dim = inner_order[component_dim];
chunk_shape[component_dim] = chunk_template.shape()[external_dim];
}
internal::ChunkGridSpecification::ComponentList components;
components.emplace_back(
internal::AsyncWriteArray::Spec{
std::move(fill_value), std::move(adjusted_component_domain)},
std::move(chunk_shape));
auto cache = std::make_unique<VirtualChunkedCache>(
internal::ChunkGridSpecification(std::move(components)),
spec.data_copy_concurrency->executor);
cache->dimension_units_ = std::move(component_units);
if (spec.read_function) {
cache->read_function_ = *spec.read_function;
}
if (spec.write_function) {
cache->write_function_ = *spec.write_function;
}
cache->inner_order_ = std::move(inner_order);
cache->grid_origin_for_read_function_.assign(
chunk_template.origin().begin(), chunk_template.origin().end());
cache->cache_pool_ = spec.cache_pool;
cache->data_copy_concurrency_ = spec.data_copy_concurrency;
return cache;
});
handle.driver = internal::MakeReadWritePtr<VirtualChunkedDriver>(
read_write_mode, VirtualChunkedDriver::Initializer{
std::move(cache), 0,
spec.data_staleness.BoundAtOpen(absl::Now())});
return handle;
}
Future<internal::Driver::Handle> VirtualChunkedDriverSpec::Open(
internal::DriverOpenRequest request) const {
return VirtualChunkedDriver::OpenFromSpecData(
internal::TransactionState::ToTransaction(std::move(request.transaction)),
*this, request.read_write_mode);
}
}
namespace internal_virtual_chunked {
Result<internal::Driver::Handle> MakeDriver(
virtual_chunked::ReadFunction read_function,
virtual_chunked::WriteFunction write_function, OpenOptions&& options) {
VirtualChunkedDriverSpec spec;
if (read_function) {
spec.read_function = std::move(read_function);
}
if (write_function) {
spec.write_function = std::move(write_function);
}
spec.schema = static_cast<Schema&&>(options);
if (!options.context) {
options.context = Context::Default();
}
TENSORSTORE_ASSIGN_OR_RETURN(
spec.cache_pool,
options.context.GetResource<internal::CachePoolResource>());
TENSORSTORE_ASSIGN_OR_RETURN(
spec.data_copy_concurrency,
options.context.GetResource<internal::DataCopyConcurrencyResource>());
if (options.recheck_cached_data.specified()) {
spec.data_staleness = StalenessBound(options.recheck_cached_data);
}
return VirtualChunkedDriver::OpenFromSpecData(std::move(options.transaction),
spec);
}
}
}
namespace garbage_collection {
template <>
struct GarbageCollection<virtual_chunked::VirtualChunkedDriver> {
static void Visit(GarbageCollectionVisitor& visitor,
const virtual_chunked::VirtualChunkedDriver& value) {
garbage_collection::GarbageCollectionVisit(visitor,
value.cache()->read_function_);
garbage_collection::GarbageCollectionVisit(visitor,
value.cache()->write_function_);
}
};
}
}
namespace {
const tensorstore::internal::SerializationOnlyDriverRegistration<
tensorstore::virtual_chunked::VirtualChunkedDriverSpec>
driver_registration;
} | #include "tensorstore/virtual_chunked.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/rank.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/function.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Promise;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::internal::ConcurrentQueue;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::serialization::SerializationRoundTrip;
template <typename... Option>
Result<tensorstore::TensorStore<Index, dynamic_rank,
tensorstore::ReadWriteMode::read>>
CoordinatesView(DimensionIndex dim, Option&&... option) {
return tensorstore::VirtualChunked<Index>(
tensorstore::NonSerializable{[dim](auto output, auto read_params)
-> Future<TimestampedStorageGeneration> {
tensorstore::IterateOverIndexRange(
output.domain(),
[&](span<const Index> indices) { output(indices) = indices[dim]; });
return TimestampedStorageGeneration{StorageGeneration::FromString(""),
absl::Now()};
}},
std::forward<Option>(option)...);
}
template <typename... Option>
Result<tensorstore::TensorStore<Index, dynamic_rank,
tensorstore::ReadWriteMode::read>>
SerializableCoordinatesView(DimensionIndex dim, Option&&... option) {
return tensorstore::VirtualChunked<Index>(
tensorstore::serialization::BindFront(
[](DimensionIndex dim, auto output,
auto read_params) -> Future<TimestampedStorageGeneration> {
tensorstore::IterateOverIndexRange(output.domain(),
[&](span<const Index> indices) {
output(indices) = indices[dim];
});
return TimestampedStorageGeneration{
StorageGeneration::FromString(""), absl::Now()};
},
dim),
std::forward<Option>(option)...);
}
using RequestLayout =
::tensorstore::StridedLayout<dynamic_rank, ::tensorstore::offset_origin>;
template <typename... Option>
Result<tensorstore::TensorStore<void, dynamic_rank,
tensorstore::ReadWriteMode::read>>
LoggingView(std::vector<RequestLayout>& requests, Option&&... option) {
auto mutex = std::make_shared<absl::Mutex>();
return tensorstore::VirtualChunked(
tensorstore::NonSerializable{
[mutex, &requests](auto output, auto read_params)
-> Future<TimestampedStorageGeneration> {
tensorstore::InitializeArray(output);
absl::MutexLock lock(mutex.get());
requests.emplace_back(output.layout());
return TimestampedStorageGeneration{
StorageGeneration::FromString(""), absl::Now()};
}},
std::forward<Option>(option)...);
}
template <typename Element, DimensionIndex Rank, typename Parameters>
struct Request {
tensorstore::Array<Element, Rank, tensorstore::offset_origin> array;
Parameters params;
Promise<TimestampedStorageGeneration> promise;
};
template <typename Element, DimensionIndex Rank, typename Parameters>
auto EnqueueRequestHandler(
ConcurrentQueue<Request<Element, Rank, Parameters>>& queue) {
return tensorstore::NonSerializable{
[&queue](
tensorstore::Array<Element, Rank, tensorstore::offset_origin> array,
Parameters params) -> Future<TimestampedStorageGeneration> {
auto [promise, future] = tensorstore::PromiseFuturePair<
TimestampedStorageGeneration>::Make();
queue.push({std::move(array), std::move(params), std::move(promise)});
return future;
}};
}
template <typename Element, DimensionIndex Rank>
using ReadRequest =
Request<Element, Rank, tensorstore::virtual_chunked::ReadParameters>;
template <typename Element, DimensionIndex Rank>
using WriteRequest =
Request<const Element, Rank, tensorstore::virtual_chunked::WriteParameters>;
template <typename Element, DimensionIndex Rank, typename... Option>
Result<
tensorstore::TensorStore<Element, Rank, tensorstore::ReadWriteMode::read>>
MockView(ConcurrentQueue<ReadRequest<Element, Rank>>& queue,
Option&&... option) {
return tensorstore::VirtualChunked<Element, Rank>(
EnqueueRequestHandler(queue), std::forward<Option>(option)...);
}
template <typename Element, DimensionIndex Rank, typename... Option>
Result<tensorstore::TensorStore<Element, Rank,
tensorstore::ReadWriteMode::read_write>>
MockView(ConcurrentQueue<ReadRequest<Element, Rank>>& read_queue,
ConcurrentQueue<WriteRequest<Element, Rank>>& write_queue,
Option&&... option) {
return tensorstore::VirtualChunked<Element, Rank>(
EnqueueRequestHandler(read_queue), EnqueueRequestHandler(write_queue),
std::forward<Option>(option)...);
}
template <typename Element, DimensionIndex Rank, typename... Option>
Result<
tensorstore::TensorStore<Element, Rank, tensorstore::ReadWriteMode::write>>
MockView(ConcurrentQueue<WriteRequest<Element, Rank>>& write_queue,
Option&&... option) {
return tensorstore::VirtualChunkedWriteOnly<Element, Rank>(
EnqueueRequestHandler(write_queue), std::forward<Option>(option)...);
}
TEST(VirtualChunkedTest, Coordinates) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0, CoordinatesView(0, tensorstore::Schema::Shape({2, 3})));
EXPECT_THAT(tensorstore::Read(coords0).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 0, 0}, {1, 1, 1}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords1, CoordinatesView(1, tensorstore::Schema::Shape({2, 3})));
EXPECT_THAT(tensorstore::Read(coords1).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 1, 2}, {0, 1, 2}})));
}
TEST(VirtualChunkedTest, CoordinatesUnbounded) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0, CoordinatesView(0, tensorstore::RankConstraint{2}));
EXPECT_THAT(
tensorstore::Read<tensorstore::zero_origin>(
coords0 | tensorstore::Dims(0, 1).SizedInterval({1000, 2}, {2, 3}))
.result(),
::testing::Optional(tensorstore::MakeArray<Index>(
{{1000, 1000, 1000}, {1001, 1001, 1001}})));
}
TEST(VirtualChunkedTest, CoordinatesInnerOrder) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0,
CoordinatesView(0, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
EXPECT_THAT(tensorstore::Read(coords0).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 0, 0}, {1, 1, 1}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords1,
CoordinatesView(1, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
EXPECT_THAT(tensorstore::Read(coords1).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 1, 2}, {0, 1, 2}})));
}
TEST(VirtualChunkedTest, SerializableCoordinatesInnerOrder) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0_orig, SerializableCoordinatesView(
0, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto coords0,
SerializationRoundTrip(coords0_orig));
EXPECT_THAT(tensorstore::Read(coords0).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 0, 0}, {1, 1, 1}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords1_orig, SerializableCoordinatesView(
1, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto coords1,
SerializationRoundTrip(coords1_orig));
EXPECT_THAT(tensorstore::Read(coords1).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 1, 2}, {0, 1, 2}})));
}
TEST(VirtualChunkedTest, ReadChunkShape) {
std::vector<RequestLayout> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto view, LoggingView(requests, tensorstore::dtype_v<bool>,
tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::ReadChunkShape({2, 1})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto chunk_layout, view.chunk_layout());
EXPECT_THAT(chunk_layout.read_chunk_shape(), ::testing::ElementsAre(2, 1));
TENSORSTORE_ASSERT_OK(tensorstore::Read(view));
EXPECT_THAT(requests, ::testing::UnorderedElementsAre(
RequestLayout({0, 0}, {2, 1}, {1, 1}),
RequestLayout({0, 1}, {2, 1}, {1, 1}),
RequestLayout({0, 2}, {2, 1}, {1, 1})));
}
TEST(VirtualChunkedTest, InnerOrder) {
std::vector<RequestLayout> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto view,
LoggingView(requests, tensorstore::dtype_v<bool>,
tensorstore::Schema::Shape({3, 4, 5}),
tensorstore::ChunkLayout::InnerOrder({2, 0, 1}),
tensorstore::ChunkLayout::ReadChunkShape({2, 3, 4})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto chunk_layout, view.chunk_layout());
EXPECT_THAT(chunk_layout.read_chunk_shape(), ::testing::ElementsAre(2, 3, 4));
EXPECT_THAT(chunk_layout.inner_order(), ::testing::ElementsAre(2, 0, 1));
TENSORSTORE_ASSERT_OK(tensorstore::Read(view));
EXPECT_THAT(requests, ::testing::UnorderedElementsAreArray({
RequestLayout({0, 0, 0}, {2, 3, 4}, {3, 1, 6}),
RequestLayout({2, 0, 0}, {1, 3, 4}, {3, 1, 6}),
RequestLayout({0, 3, 0}, {2, 1, 4}, {3, 1, 6}),
RequestLayout({2, 3, 0}, {1, 1, 4}, {3, 1, 6}),
RequestLayout({0, 0, 4}, {2, 3, 1}, {3, 1, 6}),
RequestLayout({2, 0, 4}, {1, 3, 1}, {3, 1, 6}),
RequestLayout({0, 3, 4}, {2, 1, 1}, {3, 1, 6}),
RequestLayout({2, 3, 4}, {1, 1, 1}, {3, 1, 6}),
}));
}
TEST(VirtualChunkedTest, NoRecheckCache) {
ConcurrentQueue<ReadRequest<int, 0>> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 10000000}}}}));
auto mock_view = MockView<int, 0>(
requests, tensorstore::RecheckCachedData{false}, context);
auto read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array() = 42;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("abc"), absl::Now()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
read_future = tensorstore::Read(mock_view);
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
}
TEST(VirtualChunkedTest, RecheckCache) {
ConcurrentQueue<ReadRequest<int, 0>> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 10000000}}}}));
auto mock_view = MockView<int, 0>(requests, context);
auto read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array() = 42;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("abc"), absl::Now()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
UniqueNow();
read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::FromString("abc"),
request.params.if_not_equal());
request.array() = 43;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::Unknown(), absl::Now()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
}
TEST(VirtualChunkedTest, RecheckCacheImmutable) {
ConcurrentQueue<ReadRequest<int, 0>> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 10000000}}}}));
auto mock_view =
MockView<int, 0>(requests, tensorstore::RecheckCachedData{true}, context);
auto read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array() = 42;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
UniqueNow();
read_future = tensorstore::Read(mock_view);
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
}
TEST(VirtualChunkedTest, ReadWrite) {
ConcurrentQueue<ReadRequest<int, 1>> read_requests;
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view = MockView<int, 1>(read_requests, write_requests,
tensorstore::Schema::Shape({2}));
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).IndexSlice(0));
write_future.Force();
{
auto request = read_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array(0) = 1;
request.array(1) = 2;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("gen1"), absl::Now()));
}
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::FromString("gen1"), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 2}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("gen2"), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
TEST(VirtualChunkedTest, ReadWriteWrite) {
ConcurrentQueue<ReadRequest<int, 1>> read_requests;
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 1000000}}}}));
auto mock_view = MockView<int, 1>(read_requests, write_requests, context,
tensorstore::Schema::Shape({2}));
{
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).IndexSlice(0));
write_future.Force();
{
auto request = read_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array(0) = 1;
request.array(1) = 2;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::FromString(""), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 2}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
{
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(50),
mock_view | tensorstore::Dims(0).IndexSlice(1));
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::FromString(""), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 50}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
}
TEST(VirtualChunkedTest, Write) {
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}));
{
auto write_future = tensorstore::Write(
tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).SizedInterval(0, 4));
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 42, 42, 42}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
{
auto write_future = tensorstore::Write(
tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).SizedInterval(4, 2));
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeOffsetArray<int>({4}, {42, 42}),
request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
}
TEST(VirtualChunkedTest, WriteFillValue) {
ConcurrentQueue<WriteRequest<int, 0>> write_requests;
auto mock_view = MockView<int, 0>(write_requests);
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(0), mock_view);
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeScalarArray<int>(0), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
TEST(VirtualChunkedTest, WriteOnlyError) {
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({2}));
EXPECT_THAT(
tensorstore::Write(tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).IndexSlice(0))
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Write-only virtual chunked view requires chunk-aligned writes"));
}
TEST(VirtualChunkedTest, AtomicSingleChunk) {
tensorstore::Transaction transaction(tensorstore::atomic_isolated);
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}), transaction);
TENSORSTORE_ASSERT_OK(tensorstore::Write(
tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).HalfOpenInterval(0, 4)));
auto future = transaction.CommitAsync();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 42, 42, 42}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(future);
}
TEST(VirtualChunkedTest, AtomicMultipleChunks) {
tensorstore::Transaction transaction(tensorstore::atomic_isolated);
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}), transaction);
EXPECT_THAT(
tensorstore::Write(tensorstore::MakeScalarArray<int>(42), mock_view)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot write to virtual chunk .* and write to virtual "
"chunk .* as single atomic transaction"));
}
TEST(VirtualChunkedTest, NonAtomicSingleChunk) {
tensorstore::Transaction transaction(tensorstore::isolated);
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}), transaction);
TENSORSTORE_ASSERT_OK(
tensorstore::Write(tensorstore::MakeScalarArray<int>(42), mock_view));
auto future = transaction.CommitAsync();
for (int i = 0; i < 2; ++i) {
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(future);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/virtual_chunked/virtual_chunked.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/virtual_chunked/virtual_chunked_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7d164151-1d9b-4732-8ebc-8497172ea862 | cpp | tensorflow/tensorflow | memmapped_file_system | tensorflow/core/util/memmapped_file_system.cc | tensorflow/core/util/memmapped_file_system_test.cc | #include "tensorflow/core/util/memmapped_file_system.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/memmapped_file_system.pb.h"
namespace tensorflow {
namespace {
uint64 DecodeUint64LittleEndian(const uint8* buffer) {
uint64 result = 0;
for (int i = 0; i < static_cast<int>(sizeof(uint64)); ++i) {
result |= static_cast<uint64>(buffer[i]) << (8 * i);
}
return result;
}
}
namespace {
class ReadOnlyMemoryRegionFromMemmapped : public ReadOnlyMemoryRegion {
public:
ReadOnlyMemoryRegionFromMemmapped(const void* data, uint64 length)
: data_(data), length_(length) {}
~ReadOnlyMemoryRegionFromMemmapped() override = default;
const void* data() override { return data_; }
uint64 length() override { return length_; }
private:
const void* const data_;
const uint64 length_;
};
class RandomAccessFileFromMemmapped : public RandomAccessFile {
public:
RandomAccessFileFromMemmapped(const void* data, uint64 length)
: data_(data), length_(length) {}
~RandomAccessFileFromMemmapped() override = default;
Status Name(StringPiece* result) const override {
return errors::Unimplemented(
"RandomAccessFileFromMemmapped does not support Name()");
}
Status Read(uint64 offset, size_t to_read, StringPiece* result,
char* scratch) const override {
if (offset >= length_) {
*result = StringPiece(scratch, 0);
return Status(absl::StatusCode::kOutOfRange, "Read after file end");
}
const uint64 region_left =
std::min(length_ - offset, static_cast<uint64>(to_read));
*result =
StringPiece(reinterpret_cast<const char*>(data_) + offset, region_left);
return (region_left == to_read) ? absl::OkStatus()
: Status(absl::StatusCode::kOutOfRange,
"Read less bytes than requested");
}
private:
const void* const data_;
const uint64 length_;
};
}
MemmappedFileSystem::MemmappedFileSystem() = default;
Status MemmappedFileSystem::FileExists(const string& fname,
TransactionToken* token) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(fname);
if (dir_element != directory_.end()) {
return absl::OkStatus();
}
return errors::NotFound(fname, " not found");
}
Status MemmappedFileSystem::NewRandomAccessFile(
const string& filename, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*result = std::make_unique<RandomAccessFileFromMemmapped>(
GetMemoryWithOffset(dir_element->second.offset),
dir_element->second.length);
return absl::OkStatus();
}
Status MemmappedFileSystem::NewReadOnlyMemoryRegionFromFile(
const string& filename, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*result = std::make_unique<ReadOnlyMemoryRegionFromMemmapped>(
GetMemoryWithOffset(dir_element->second.offset),
dir_element->second.length);
return absl::OkStatus();
}
Status MemmappedFileSystem::GetFileSize(const string& filename,
TransactionToken* token, uint64* size) {
if (!mapped_memory_) {
return errors::FailedPrecondition("MemmappedEnv is not initialized");
}
const auto dir_element = directory_.find(filename);
if (dir_element == directory_.end()) {
return errors::NotFound("Region ", filename, " is not found");
}
*size = dir_element->second.length;
return absl::OkStatus();
}
Status MemmappedFileSystem::Stat(const string& fname, TransactionToken* token,
FileStatistics* stat) {
uint64 size;
auto status = GetFileSize(fname, token, &size);
if (status.ok()) {
stat->length = size;
}
return status;
}
Status MemmappedFileSystem::NewWritableFile(const string& filename,
TransactionToken* token,
std::unique_ptr<WritableFile>* wf) {
return errors::Unimplemented("memmapped format doesn't support writing");
}
Status MemmappedFileSystem::NewAppendableFile(
const string& filename, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return errors::Unimplemented("memmapped format doesn't support writing");
}
Status MemmappedFileSystem::GetChildren(const string& filename,
TransactionToken* token,
std::vector<string>* strings) {
return errors::Unimplemented("memmapped format doesn't support GetChildren");
}
Status MemmappedFileSystem::GetMatchingPaths(const string& pattern,
TransactionToken* token,
std::vector<string>* results) {
return errors::Unimplemented(
"memmapped format doesn't support GetMatchingPaths");
}
Status MemmappedFileSystem::DeleteFile(const string& filename,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support DeleteFile");
}
Status MemmappedFileSystem::CreateDir(const string& dirname,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support CreateDir");
}
Status MemmappedFileSystem::DeleteDir(const string& dirname,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support DeleteDir");
}
Status MemmappedFileSystem::RenameFile(const string& filename_from,
const string& filename_to,
TransactionToken* token) {
return errors::Unimplemented("memmapped format doesn't support RenameFile");
}
const void* MemmappedFileSystem::GetMemoryWithOffset(uint64 offset) const {
return reinterpret_cast<const uint8*>(mapped_memory_->data()) + offset;
}
constexpr const char MemmappedFileSystem::kMemmappedPackagePrefix[];
constexpr const char MemmappedFileSystem::kMemmappedPackageDefaultGraphDef[];
Status MemmappedFileSystem::InitializeFromFile(Env* env,
const string& filename) {
TF_RETURN_IF_ERROR(
env->NewReadOnlyMemoryRegionFromFile(filename, &mapped_memory_));
directory_.clear();
if (mapped_memory_->length() <= sizeof(uint64)) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid package size");
}
const auto memory_start =
reinterpret_cast<const uint8*>(mapped_memory_->data());
const uint64 directory_offset = DecodeUint64LittleEndian(
memory_start + mapped_memory_->length() - sizeof(uint64));
if (directory_offset > mapped_memory_->length() - sizeof(uint64)) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid directory offset");
}
MemmappedFileSystemDirectory proto_directory;
if (!ParseProtoUnlimited(
&proto_directory, memory_start + directory_offset,
mapped_memory_->length() - directory_offset - sizeof(uint64))) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Can't parse its internal directory");
}
uint64 prev_element_offset = directory_offset;
for (auto element_iter = proto_directory.element().rbegin();
element_iter != proto_directory.element().rend(); ++element_iter) {
if (element_iter->offset() >= prev_element_offset) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Invalid offset of internal component");
}
if (!directory_
.insert(std::make_pair(
element_iter->name(),
FileRegion(element_iter->offset(), element_iter->length())))
.second) {
return errors::DataLoss("Corrupted memmapped model file: ", filename,
" Duplicate name of internal component ",
element_iter->name());
}
prev_element_offset = element_iter->offset();
}
return absl::OkStatus();
}
bool MemmappedFileSystem::IsMemmappedPackageFilename(const string& filename) {
return absl::StartsWith(filename, kMemmappedPackagePrefix);
}
namespace {
bool IsValidRegionChar(char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '_' || c == '.';
}
}
bool MemmappedFileSystem::IsWellFormedMemmappedPackageFilename(
const string& filename) {
if (!IsMemmappedPackageFilename(filename)) {
return false;
}
for (char c :
filename.substr(strlen(kMemmappedPackagePrefix),
filename.length() - strlen(kMemmappedPackagePrefix))) {
if (!IsValidRegionChar(c)) {
return false;
}
}
return true;
}
MemmappedEnv::MemmappedEnv(Env* env) : EnvWrapper(env) {}
Status MemmappedEnv::GetFileSystemForFile(const string& fname,
FileSystem** result) {
if (MemmappedFileSystem::IsMemmappedPackageFilename(fname)) {
if (!memmapped_file_system_) {
return errors::FailedPrecondition(
"MemmappedEnv is not initialized from a file.");
}
*result = memmapped_file_system_.get();
return absl::OkStatus();
}
return EnvWrapper::GetFileSystemForFile(fname, result);
}
Status MemmappedEnv::GetRegisteredFileSystemSchemes(
std::vector<string>* schemes) {
const auto status = EnvWrapper::GetRegisteredFileSystemSchemes(schemes);
if (status.ok()) {
schemes->emplace_back(MemmappedFileSystem::kMemmappedPackagePrefix);
}
return status;
}
Status MemmappedEnv::InitializeFromFile(const string& package_filename) {
std::unique_ptr<MemmappedFileSystem> file_system_ptr(new MemmappedFileSystem);
const auto status =
file_system_ptr->InitializeFromFile(target(), package_filename);
if (status.ok()) {
memmapped_file_system_ = std::move(file_system_ptr);
}
return status;
}
} | #include "tensorflow/core/util/memmapped_file_system.h"
#include <memory>
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/memmapped_file_system_writer.h"
#ifdef PLATFORM_WINDOWS
#undef DeleteFile
#endif
namespace tensorflow {
namespace {
constexpr char kTensor1FileName[] = "memmapped_package:
constexpr char kTensor2FileName[] = "memmapped_package:
constexpr char kProtoFileName[] = "memmapped_package:
constexpr int kTestGraphDefVersion = 666;
Status CreateMemmappedFileSystemFile(const string& filename, bool corrupted,
Tensor* test_tensor) {
Env* env = Env::Default();
MemmappedFileSystemWriter writer;
TF_RETURN_IF_ERROR(writer.InitializeToFile(env, filename));
test::FillFn<float>(test_tensor,
[](int i) { return static_cast<float>(i * i); });
TF_RETURN_IF_ERROR(writer.SaveTensor(*test_tensor, kTensor1FileName));
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(kTestGraphDefVersion);
graph_def.mutable_versions()->set_min_consumer(kTestGraphDefVersion);
TF_RETURN_IF_ERROR(writer.SaveProtobuf(graph_def, kProtoFileName));
test::FillFn<float>(test_tensor,
[](int i) { return static_cast<float>(i) * i * i; });
TF_RETURN_IF_ERROR(writer.SaveTensor(*test_tensor, kTensor2FileName));
if (!corrupted) {
TF_RETURN_IF_ERROR(writer.FlushAndClose());
}
return absl::OkStatus();
}
TEST(MemmappedFileSystemTest, SimpleTest) {
const TensorShape test_tensor_shape = {10, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, false, &test_tensor));
MemmappedEnv memmapped_env(Env::Default());
TF_ASSERT_OK(memmapped_env.InitializeFromFile(filename));
GraphDef test_graph_def;
TF_EXPECT_OK(
ReadBinaryProto(&memmapped_env, kProtoFileName, &test_graph_def));
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().producer());
EXPECT_EQ(kTestGraphDefVersion, test_graph_def.versions().min_consumer());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
TF_ASSERT_OK(memmapped_env.NewReadOnlyMemoryRegionFromFile(kTensor2FileName,
&memory_region));
ASSERT_GE(memory_region->length(), test_tensor.TotalBytes());
EXPECT_EQ(test_tensor.tensor_data(),
StringPiece(static_cast<const char*>(memory_region->data()),
test_tensor.TotalBytes()));
uint64 file_size = 0;
TF_ASSERT_OK(memmapped_env.GetFileSize(kTensor2FileName, &file_size));
EXPECT_EQ(test_tensor.TotalBytes(), file_size);
FileStatistics stat;
TF_ASSERT_OK(memmapped_env.Stat(kTensor2FileName, &stat));
EXPECT_EQ(test_tensor.TotalBytes(), stat.length);
EXPECT_EQ(
error::NOT_FOUND,
memmapped_env.NewReadOnlyMemoryRegionFromFile("bla-bla", &memory_region)
.code());
TF_EXPECT_OK(memmapped_env.FileExists(kTensor2FileName));
EXPECT_EQ(error::Code::NOT_FOUND,
memmapped_env.FileExists("bla-bla-bla").code());
}
TEST(MemmappedFileSystemTest, NotInitialized) {
MemmappedEnv memmapped_env(Env::Default());
std::unique_ptr<ReadOnlyMemoryRegion> memory_region;
EXPECT_EQ(
error::FAILED_PRECONDITION,
memmapped_env
.NewReadOnlyMemoryRegionFromFile(kTensor1FileName, &memory_region)
.code());
std::unique_ptr<RandomAccessFile> file;
EXPECT_EQ(error::FAILED_PRECONDITION,
memmapped_env.NewRandomAccessFile(kProtoFileName, &file).code());
}
TEST(MemmappedFileSystemTest, Corrupted) {
const TensorShape test_tensor_shape = {100, 200};
Tensor test_tensor(DT_FLOAT, test_tensor_shape);
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "memmapped_env_corrupted_test");
TF_ASSERT_OK(CreateMemmappedFileSystemFile(filename, true, &test_tensor));
MemmappedFileSystem memmapped_env;
ASSERT_NE(memmapped_env.InitializeFromFile(Env::Default(), filename),
absl::OkStatus());
}
TEST(MemmappedFileSystemTest, ProxyToDefault) {
MemmappedEnv memmapped_env(Env::Default());
const string dir = testing::TmpDir();
const string filename = io::JoinPath(dir, "test_file");
std::unique_ptr<WritableFile> writable_file_temp;
TF_ASSERT_OK(memmapped_env.NewAppendableFile(filename, &writable_file_temp));
const auto adh = [&memmapped_env, &filename](WritableFile* f) {
delete f;
TF_CHECK_OK(memmapped_env.DeleteFile(filename));
};
std::unique_ptr<WritableFile, decltype(adh)> writable_file(
writable_file_temp.release(), adh);
const string test_string = "bla-bla-bla";
TF_ASSERT_OK(writable_file->Append(test_string));
TF_ASSERT_OK(writable_file->Close());
uint64 file_length = 0;
TF_EXPECT_OK(memmapped_env.GetFileSize(filename, &file_length));
EXPECT_EQ(test_string.length(), file_length);
FileStatistics stat;
TF_EXPECT_OK(memmapped_env.Stat(filename, &stat));
EXPECT_EQ(test_string.length(), stat.length);
std::unique_ptr<RandomAccessFile> random_access_file;
TF_ASSERT_OK(
memmapped_env.NewRandomAccessFile(filename, &random_access_file));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/memmapped_file_system.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/memmapped_file_system_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
746af95a-344b-481c-9b17-a794ea3ed4f6 | cpp | tensorflow/tensorflow | padding | tensorflow/lite/delegates/gpu/common/tasks/padding.cc | tensorflow/lite/delegates/gpu/cl/kernels/padding_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/padding.h"
#include <string>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GetPaddingCode(const OperationDef& op_def,
const PadAttributes& attr, GPUOperation* op) {
op->AddSrcTensor("src_tensor", op_def.src_tensors[0]);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
op->args_.AddInt("prepended_x", attr.prepended.w);
op->args_.AddInt("prepended_y", attr.prepended.h);
op->args_.AddInt("prepended_z", attr.prepended.c);
op->args_.AddInt("prepended_w", attr.prepended.b);
const std::string dst_batch =
op_def.dst_tensors[0].HasAxis(Axis::BATCH) ? "B" : "0";
std::string c;
const std::string channels[] = {".x", ".y", ".z", ".w"};
if (attr.type == PaddingContentType::REFLECT) {
c += "int reflect_coord(int x, int size) {\n";
c += " int t = abs(x) - size + 1;\n";
c += " return size - 1 - abs(t);\n";
c += "}\n\n";
}
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " args.src_tensor::type result = (" +
ToCLDataType(op_def.src_tensors[0].GetDataType(), 4) +
")(" + std::to_string(attr.constant_values) + ");\n";
c += " int s_x = X - args.prepended_x;\n";
c += " int s_y = Y - args.prepended_y;\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = " + dst_batch + " - args.prepended_w;\n";
c += " args.src_tensor.SetBatchRef(s_b);\n";
}
if (attr.type == PaddingContentType::REFLECT) {
c += " s_x = reflect_coord(s_x, args.src_tensor.Width());\n";
c += " s_y = reflect_coord(s_y, args.src_tensor.Height());\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int s_b = reflect_coord(s_b, args.src_tensor.Batch());\n";
}
if (attr.prepended.c == 0 && attr.appended.c == 0) {
c += " result = args.src_tensor.Read(s_x, s_y, Z);\n";
} else {
c += " int start_channel = Z * 4;\n";
for (int i = 0; i < 4; ++i) {
const auto& s = channels[i];
c += " {\n";
c += " int channel = start_channel + " + std::to_string(i) + ";\n";
c += " int s_z = channel - args.prepended_z;\n";
c += " s_z = clamp(reflect_coord(s_z, args.src_tensor.Channels()), "
"0, "
"args.src_tensor.Channels() - "
"1);\n";
c += " args.src_tensor.ReadPerChannel(result" + s +
", s_x, s_y, s_z);\n";
c += " }\n";
}
}
} else {
c += " bool inside_x = s_x >= 0 && s_x < args.src_tensor.Width();\n";
c += " bool inside_y = s_y >= 0 && s_y < args.src_tensor.Height();\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " inside_y = inside_y && (s_b >= 0 && s_b < "
"args.src_tensor.Batch());\n";
}
c += " if (inside_x && inside_y) {\n";
if (attr.prepended.c == 0 && attr.appended.c == 0) {
c += " result = args.src_tensor.Read(s_x, s_y, Z);\n";
} else if (attr.prepended.c % 4 == 0 &&
attr.prepended.b == attr.appended.b) {
c += " int s_z = Z - args.prepended_z / 4;\n";
c += " if (s_z >= 0 && s_z < args.src_tensor.Slices()) {\n";
c += " result = args.src_tensor.Read(s_x, s_y, s_z);\n";
c += " }\n";
} else {
c += " int start_channel = Z * 4;\n";
for (int i = 0; i < 4; ++i) {
const auto& s = channels[i];
c += " {\n";
c += " int channel = start_channel + " + std::to_string(i) + ";\n";
c += " int s_z = channel - args.prepended_z;\n";
c += " if (s_z >= 0 && s_z < args.src_tensor.Channels()) {\n";
c += " args.src_tensor.ReadPerChannel(result" + s +
", s_x, s_y, s_z);\n";
c += " }\n";
c += " }\n";
}
}
c += " }\n";
}
c += " args.dst_tensor.Write(result, X, Y, Z);\n";
c += "}\n";
return c;
}
}
GPUOperation CreatePadding(const OperationDef& definition,
const PadAttributes& attr) {
GPUOperation op(definition);
op.code_ = GetPaddingCode(definition, attr, &op);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
return op;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/padding_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, PaddingAppendWidth) {
auto status = PaddingAppendWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingAppendWidthConstValues) {
auto status = PaddingAppendWidthConstValuesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependWidth) {
auto status = PaddingPrependWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingAppendHeight) {
auto status = PaddingAppendHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependHeight) {
auto status = PaddingPrependHeightTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingAppendChannels) {
auto status = PaddingAppendChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependChannels) {
auto status = PaddingPrependChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingPrependChannelsX4) {
auto status = PaddingPrependChannelsX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingComplex) {
auto status = PaddingComplexTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingReflectWidth) {
auto status = PaddingReflectWidthTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, PaddingReflectChannels) {
auto status = PaddingReflectChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/padding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/padding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
142edbef-bb7f-49ca-8044-b0c2a9bdfa53 | cpp | google/quiche | spdy_utils | quiche/quic/core/http/spdy_utils.cc | quiche/quic/core/http/spdy_utils_test.cc | #include "quiche/quic/core/http/spdy_utils.h"
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/quiche_text_utils.h"
using quiche::HttpHeaderBlock;
namespace quic {
bool SpdyUtils::ExtractContentLengthFromHeaders(int64_t* content_length,
HttpHeaderBlock* headers) {
auto it = headers->find("content-length");
if (it == headers->end()) {
return false;
} else {
absl::string_view content_length_header = it->second;
std::vector<absl::string_view> values =
absl::StrSplit(content_length_header, '\0');
for (const absl::string_view& value : values) {
uint64_t new_value;
if (!absl::SimpleAtoi(value, &new_value) ||
!quiche::QuicheTextUtils::IsAllDigits(value)) {
QUIC_DLOG(ERROR)
<< "Content length was either unparseable or negative.";
return false;
}
if (*content_length < 0) {
*content_length = new_value;
continue;
}
if (new_value != static_cast<uint64_t>(*content_length)) {
QUIC_DLOG(ERROR)
<< "Parsed content length " << new_value << " is "
<< "inconsistent with previously detected content length "
<< *content_length;
return false;
}
}
return true;
}
}
bool SpdyUtils::CopyAndValidateHeaders(const QuicHeaderList& header_list,
int64_t* content_length,
HttpHeaderBlock* headers) {
for (const auto& p : header_list) {
const std::string& name = p.first;
if (name.empty()) {
QUIC_DLOG(ERROR) << "Header name must not be empty.";
return false;
}
if (quiche::QuicheTextUtils::ContainsUpperCase(name)) {
QUIC_DLOG(ERROR) << "Malformed header: Header name " << name
<< " contains upper-case characters.";
return false;
}
headers->AppendValueOrAddHeader(name, p.second);
}
if (headers->contains("content-length") &&
!ExtractContentLengthFromHeaders(content_length, headers)) {
return false;
}
QUIC_DVLOG(1) << "Successfully parsed headers: " << headers->DebugString();
return true;
}
bool SpdyUtils::CopyAndValidateTrailers(const QuicHeaderList& header_list,
bool expect_final_byte_offset,
size_t* final_byte_offset,
HttpHeaderBlock* trailers) {
bool found_final_byte_offset = false;
for (const auto& p : header_list) {
const std::string& name = p.first;
if (expect_final_byte_offset && !found_final_byte_offset &&
name == kFinalOffsetHeaderKey &&
absl::SimpleAtoi(p.second, final_byte_offset)) {
found_final_byte_offset = true;
continue;
}
if (name.empty() || name[0] == ':') {
QUIC_DLOG(ERROR)
<< "Trailers must not be empty, and must not contain pseudo-"
<< "headers. Found: '" << name << "'";
return false;
}
if (quiche::QuicheTextUtils::ContainsUpperCase(name)) {
QUIC_DLOG(ERROR) << "Malformed header: Header name " << name
<< " contains upper-case characters.";
return false;
}
trailers->AppendValueOrAddHeader(name, p.second);
}
if (expect_final_byte_offset && !found_final_byte_offset) {
QUIC_DLOG(ERROR) << "Required key '" << kFinalOffsetHeaderKey
<< "' not present";
return false;
}
QUIC_DVLOG(1) << "Successfully parsed Trailers: " << trailers->DebugString();
return true;
}
bool SpdyUtils::PopulateHeaderBlockFromUrl(const std::string url,
HttpHeaderBlock* headers) {
(*headers)[":method"] = "GET";
size_t pos = url.find(":
if (pos == std::string::npos) {
return false;
}
(*headers)[":scheme"] = url.substr(0, pos);
size_t start = pos + 3;
pos = url.find('/', start);
if (pos == std::string::npos) {
(*headers)[":authority"] = url.substr(start);
(*headers)[":path"] = "/";
return true;
}
(*headers)[":authority"] = url.substr(start, pos - start);
(*headers)[":path"] = url.substr(pos);
return true;
}
ParsedQuicVersion SpdyUtils::ExtractQuicVersionFromAltSvcEntry(
const spdy::SpdyAltSvcWireFormat::AlternativeService&
alternative_service_entry,
const ParsedQuicVersionVector& supported_versions) {
for (const ParsedQuicVersion& version : supported_versions) {
if (version.AlpnDeferToRFCv1()) {
continue;
}
if (AlpnForVersion(version) == alternative_service_entry.protocol_id) {
return version;
}
}
return ParsedQuicVersion::Unsupported();
}
} | #include "quiche/quic/core/http/spdy_utils.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_test.h"
using quiche::HttpHeaderBlock;
using testing::Pair;
using testing::UnorderedElementsAre;
namespace quic {
namespace test {
namespace {
const bool kExpectFinalByteOffset = true;
const bool kDoNotExpectFinalByteOffset = false;
static std::unique_ptr<QuicHeaderList> FromList(
const QuicHeaderList::ListType& src) {
auto headers = std::make_unique<QuicHeaderList>();
for (const auto& p : src) {
headers->OnHeader(p.first, p.second);
}
headers->OnHeaderBlockEnd(0, 0);
return headers;
}
}
using CopyAndValidateHeaders = QuicTest;
TEST_F(CopyAndValidateHeaders, NormalUsage) {
auto headers = FromList({
{"cookie", " part 1"},
{"cookie", "part 2 "},
{"cookie", "part3"},
{"passed-through", std::string("foo\0baz", 7)},
{"joined", "value 1"},
{"joined", "value 2"},
{"empty", ""},
{"empty-joined", ""},
{"empty-joined", "foo"},
{"empty-joined", ""},
{"empty-joined", ""},
{"cookie", " fin!"}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block,
UnorderedElementsAre(
Pair("cookie", " part 1; part 2 ; part3; fin!"),
Pair("passed-through", absl::string_view("foo\0baz", 7)),
Pair("joined", absl::string_view("value 1\0value 2", 15)),
Pair("empty", ""),
Pair("empty-joined", absl::string_view("\0foo\0\0", 6))));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, EmptyName) {
auto headers = FromList({{"foo", "foovalue"}, {"", "barvalue"}, {"baz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, UpperCaseName) {
auto headers =
FromList({{"foo", "foovalue"}, {"bar", "barvalue"}, {"bAz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, MultipleContentLengths) {
auto headers = FromList({{"content-length", "9"},
{"foo", "foovalue"},
{"content-length", "9"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("content-length", absl::string_view("9\09", 3)),
Pair("baz", "")));
EXPECT_EQ(9, content_length);
}
TEST_F(CopyAndValidateHeaders, InconsistentContentLengths) {
auto headers = FromList({{"content-length", "9"},
{"foo", "foovalue"},
{"content-length", "8"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, LargeContentLength) {
auto headers = FromList({{"content-length", "9000000000"},
{"foo", "foovalue"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block,
UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("content-length", absl::string_view("9000000000")),
Pair("baz", "")));
EXPECT_EQ(9000000000, content_length);
}
TEST_F(CopyAndValidateHeaders, NonDigitContentLength) {
auto headers = FromList({{"content-length", "+123"},
{"foo", "foovalue"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
EXPECT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, MultipleValues) {
auto headers = FromList({{"foo", "foovalue"},
{"bar", "barvalue"},
{"baz", ""},
{"foo", "boo"},
{"baz", "buzz"}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", absl::string_view("foovalue\0boo", 12)),
Pair("bar", "barvalue"),
Pair("baz", absl::string_view("\0buzz", 5))));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, MoreThanTwoValues) {
auto headers = FromList({{"set-cookie", "value1"},
{"set-cookie", "value2"},
{"set-cookie", "value3"}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(Pair(
"set-cookie",
absl::string_view("value1\0value2\0value3", 20))));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, Cookie) {
auto headers = FromList({{"foo", "foovalue"},
{"bar", "barvalue"},
{"cookie", "value1"},
{"baz", ""}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("cookie", "value1"), Pair("baz", "")));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, MultipleCookies) {
auto headers = FromList({{"foo", "foovalue"},
{"bar", "barvalue"},
{"cookie", "value1"},
{"baz", ""},
{"cookie", "value2"}});
int64_t content_length = -1;
HttpHeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("cookie", "value1; value2"), Pair("baz", "")));
EXPECT_EQ(-1, content_length);
}
using CopyAndValidateTrailers = QuicTest;
TEST_F(CopyAndValidateTrailers, SimplestValidList) {
auto trailers = FromList({{kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_EQ(1234u, final_byte_offset);
}
TEST_F(CopyAndValidateTrailers, EmptyTrailerListWithFinalByteOffsetExpected) {
QuicHeaderList trailers;
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers,
EmptyTrailerListWithFinalByteOffsetNotExpected) {
QuicHeaderList trailers;
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
trailers, kDoNotExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_TRUE(block.empty());
}
TEST_F(CopyAndValidateTrailers, FinalByteOffsetExpectedButNotPresent) {
auto trailers = FromList({{"key", "value"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, FinalByteOffsetNotExpectedButPresent) {
auto trailers = FromList({{"key", "value"}, {kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kDoNotExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, FinalByteOffsetNotExpectedAndNotPresent) {
auto trailers = FromList({{"key", "value"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kDoNotExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_THAT(block, UnorderedElementsAre(Pair("key", "value")));
}
TEST_F(CopyAndValidateTrailers, EmptyName) {
auto trailers = FromList({{"", "value"}, {kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, PseudoHeaderInTrailers) {
auto trailers =
FromList({{":pseudo_key", "value"}, {kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, DuplicateTrailers) {
auto trailers = FromList({{"key", "value0"},
{"key", "value1"},
{"key", ""},
{"key", ""},
{"key", "value2"},
{"key", ""},
{kFinalOffsetHeaderKey, "1234"},
{"other_key", "value"},
{"key", "non_contiguous_duplicate"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_THAT(
block,
UnorderedElementsAre(
Pair("key",
absl::string_view(
"value0\0value1\0\0\0value2\0\0non_contiguous_duplicate",
48)),
Pair("other_key", "value")));
}
TEST_F(CopyAndValidateTrailers, DuplicateCookies) {
auto headers = FromList({{"cookie", " part 1"},
{"cookie", "part 2 "},
{"cookie", "part3"},
{"key", "value"},
{kFinalOffsetHeaderKey, "1234"},
{"cookie", " non_contiguous_cookie!"}});
size_t final_byte_offset = 0;
HttpHeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*headers, kExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_THAT(
block,
UnorderedElementsAre(
Pair("cookie", " part 1; part 2 ; part3; non_contiguous_cookie!"),
Pair("key", "value")));
}
using PopulateHeaderBlockFromUrl = QuicTest;
TEST_F(PopulateHeaderBlockFromUrl, NormalUsage) {
std::string url = "https:
HttpHeaderBlock headers;
EXPECT_TRUE(SpdyUtils::PopulateHeaderBlockFromUrl(url, &headers));
EXPECT_EQ("https", headers[":scheme"].as_string());
EXPECT_EQ("www.google.com", headers[":authority"].as_string());
EXPECT_EQ("/index.html", headers[":path"].as_string());
}
TEST_F(PopulateHeaderBlockFromUrl, UrlWithNoPath) {
std::string url = "https:
HttpHeaderBlock headers;
EXPECT_TRUE(SpdyUtils::PopulateHeaderBlockFromUrl(url, &headers));
EXPECT_EQ("https", headers[":scheme"].as_string());
EXPECT_EQ("www.google.com", headers[":authority"].as_string());
EXPECT_EQ("/", headers[":path"].as_string());
}
TEST_F(PopulateHeaderBlockFromUrl, Failure) {
HttpHeaderBlock headers;
EXPECT_FALSE(SpdyUtils::PopulateHeaderBlockFromUrl("/", &headers));
EXPECT_FALSE(SpdyUtils::PopulateHeaderBlockFromUrl("/index.html", &headers));
EXPECT_FALSE(
SpdyUtils::PopulateHeaderBlockFromUrl("www.google.com/", &headers));
}
using ExtractQuicVersionFromAltSvcEntry = QuicTest;
TEST_F(ExtractQuicVersionFromAltSvcEntry, SupportedVersion) {
ParsedQuicVersionVector supported_versions = AllSupportedVersions();
spdy::SpdyAltSvcWireFormat::AlternativeService entry;
for (const ParsedQuicVersion& version : supported_versions) {
entry.protocol_id = AlpnForVersion(version);
ParsedQuicVersion expected_version = version;
if (entry.protocol_id == AlpnForVersion(ParsedQuicVersion::RFCv1()) &&
version != ParsedQuicVersion::RFCv1()) {
expected_version = ParsedQuicVersion::RFCv1();
}
EXPECT_EQ(expected_version, SpdyUtils::ExtractQuicVersionFromAltSvcEntry(
entry, supported_versions))
<< "version: " << version;
}
}
TEST_F(ExtractQuicVersionFromAltSvcEntry, UnsupportedVersion) {
spdy::SpdyAltSvcWireFormat::AlternativeService entry;
entry.protocol_id = "quic";
EXPECT_EQ(ParsedQuicVersion::Unsupported(),
SpdyUtils::ExtractQuicVersionFromAltSvcEntry(
entry, AllSupportedVersions()));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/spdy_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/spdy_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
5e8694bc-5801-48fa-903c-52aec37caa65 | cpp | google/quiche | qbone_tunnel_silo | quiche/quic/qbone/bonnet/qbone_tunnel_silo.cc | quiche/quic/qbone/bonnet/qbone_tunnel_silo_test.cc | #include "quiche/quic/qbone/bonnet/qbone_tunnel_silo.h"
namespace quic {
void QboneTunnelSilo::Run() {
while (ShouldRun()) {
tunnel_->WaitForEvents();
}
QUIC_LOG(INFO) << "Tunnel has disconnected in state: "
<< tunnel_->StateToString(tunnel_->Disconnect());
}
void QboneTunnelSilo::Quit() {
QUIC_LOG(INFO) << "Quit called on QboneTunnelSilo";
quitting_.Notify();
tunnel_->Wake();
}
bool QboneTunnelSilo::ShouldRun() {
bool post_init_shutdown_ready =
only_setup_tun_ &&
tunnel_->state() == quic::QboneTunnelInterface::STARTED;
return !quitting_.HasBeenNotified() && !post_init_shutdown_ready;
}
} | #include "quiche/quic/qbone/bonnet/qbone_tunnel_silo.h"
#include "absl/synchronization/notification.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/bonnet/mock_qbone_tunnel.h"
namespace quic {
namespace {
using ::testing::Eq;
using ::testing::Invoke;
using ::testing::Return;
TEST(QboneTunnelSiloTest, SiloRunsEventLoop) {
MockQboneTunnel mock_tunnel;
absl::Notification event_loop_run;
EXPECT_CALL(mock_tunnel, WaitForEvents)
.WillRepeatedly(Invoke([&event_loop_run]() {
if (!event_loop_run.HasBeenNotified()) {
event_loop_run.Notify();
}
return false;
}));
QboneTunnelSilo silo(&mock_tunnel, false);
silo.Start();
event_loop_run.WaitForNotification();
absl::Notification client_disconnected;
EXPECT_CALL(mock_tunnel, Disconnect)
.WillOnce(Invoke([&client_disconnected]() {
client_disconnected.Notify();
return QboneTunnelInterface::ENDED;
}));
silo.Quit();
client_disconnected.WaitForNotification();
silo.Join();
}
TEST(QboneTunnelSiloTest, SiloCanShutDownAfterInit) {
MockQboneTunnel mock_tunnel;
int iteration_count = 0;
EXPECT_CALL(mock_tunnel, WaitForEvents)
.WillRepeatedly(Invoke([&iteration_count]() {
iteration_count++;
return false;
}));
EXPECT_CALL(mock_tunnel, state)
.WillOnce(Return(QboneTunnelInterface::START_REQUESTED))
.WillOnce(Return(QboneTunnelInterface::STARTED));
absl::Notification client_disconnected;
EXPECT_CALL(mock_tunnel, Disconnect)
.WillOnce(Invoke([&client_disconnected]() {
client_disconnected.Notify();
return QboneTunnelInterface::ENDED;
}));
QboneTunnelSilo silo(&mock_tunnel, true);
silo.Start();
client_disconnected.WaitForNotification();
silo.Join();
EXPECT_THAT(iteration_count, Eq(1));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/qbone_tunnel_silo.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/qbone_tunnel_silo_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e1c22087-66df-40a5-a4bc-a00b88a25171 | cpp | google/cel-cpp | field_mask | extensions/protobuf/internal/field_mask.cc | extensions/protobuf/internal/field_mask_test.cc | #include "extensions/protobuf/internal/field_mask.h"
#include <string>
#include "google/protobuf/field_mask.pb.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "common/json.h"
#include "extensions/protobuf/internal/field_mask_lite.h"
#include "extensions/protobuf/internal/is_generated_message.h"
#include "extensions/protobuf/internal/is_message_lite.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/reflection.h"
namespace cel::extensions::protobuf_internal {
absl::StatusOr<JsonString> DynamicFieldMaskProtoToJsonString(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.FieldMask");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if constexpr (NotMessageLite<google::protobuf::FieldMask>) {
if (IsGeneratedMessage(message)) {
return GeneratedFieldMaskProtoToJsonString(
google::protobuf::DownCastMessage<google::protobuf::FieldMask>(message));
}
}
const auto* reflection = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflection == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* paths_field =
desc->FindFieldByNumber(google::protobuf::FieldMask::kPathsFieldNumber);
if (ABSL_PREDICT_FALSE(paths_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing paths field descriptor"));
}
if (ABSL_PREDICT_FALSE(paths_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_STRING)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected paths field type: ", paths_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(!paths_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(),
" has unexpected paths field cardinality: UNKNOWN"));
}
return JsonString(absl::StrJoin(
reflection->GetRepeatedFieldRef<std::string>(message, paths_field), ","));
}
} | #include "extensions/protobuf/internal/field_mask.h"
#include <memory>
#include "google/protobuf/field_mask.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/memory/memory.h"
#include "extensions/protobuf/internal/field_mask_lite.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
namespace cel::extensions::protobuf_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::Eq;
TEST(FieldMask, GeneratedFromProto) {
google::protobuf::FieldMask proto;
proto.add_paths("foo");
proto.add_paths("bar");
EXPECT_THAT(GeneratedFieldMaskProtoToJsonString(proto),
IsOkAndHolds(Eq(JsonString("foo,bar"))));
}
TEST(Any, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::FieldMask::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory
.GetPrototype(pool.FindMessageTypeByName("google.protobuf.FieldMask"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* paths_field = descriptor->FindFieldByName("paths");
ASSERT_NE(paths_field, nullptr);
reflection->AddString(proto.get(), paths_field, "foo");
reflection->AddString(proto.get(), paths_field, "bar");
EXPECT_THAT(DynamicFieldMaskProtoToJsonString(*proto),
IsOkAndHolds(Eq(JsonString("foo,bar"))));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/field_mask.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/field_mask_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |