ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
f1c72f99-0fb9-453a-a24b-21b943ceb2cf | cpp | tensorflow/tensorflow | dynamic_dimension_simplifier | third_party/xla/xla/service/dynamic_dimension_simplifier.cc | third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc | #include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::StatusOr<bool> ConcatForwarding(HloInstruction* concat) {
if (concat->opcode() != HloOpcode::kConcatenate) {
return false;
}
bool changed = false;
auto parent = concat->parent();
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : concat->operands()) {
if (operand->opcode() != HloOpcode::kConcatenate ||
operand->concatenate_dimension() != concat->concatenate_dimension()) {
new_operands.push_back(operand);
} else {
changed = true;
for (HloInstruction* operand_operand : operand->operands()) {
new_operands.push_back(operand_operand);
}
}
}
if (changed) {
auto new_concat = parent->AddInstruction(HloInstruction::CreateConcatenate(
concat->shape(), new_operands, concat->concatenate_dimension()));
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(concat, new_concat));
}
return changed;
}
absl::StatusOr<bool> SliceConcatForwarding(HloInstruction* slice) {
if (slice->opcode() != HloOpcode::kSlice) {
return false;
}
auto concat = slice->mutable_operand(0);
if (concat->opcode() != HloOpcode::kConcatenate) {
return false;
}
if (slice->shape().rank() != 1) {
return false;
}
int64_t concat_dim = concat->concatenate_dimension();
std::vector<HloInstruction*> new_operands;
int64_t size_so_far = 0;
int64_t slice_size = slice->shape().dimensions(concat_dim);
if (slice_size != slice->slice_limits(0) - slice->slice_starts(0)) {
return false;
}
if (slice->slice_strides(0) != 1) {
return false;
}
for (HloInstruction* operand : concat->operands()) {
if (size_so_far == slice->slice_starts(0) &&
operand->shape().dimensions(0) == slice_size) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(operand));
return true;
}
size_so_far += operand->shape().dimensions(concat_dim);
}
return false;
}
absl::StatusOr<bool> ReshapeBroadcastForwarding(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto broadcast = reshape->mutable_operand(0);
if (broadcast->opcode() != HloOpcode::kBroadcast) {
return false;
}
if (reshape->shape().rank() != 0) {
return false;
}
if (broadcast->shape().rank() != 1) {
return false;
}
if (broadcast->mutable_operand(0)->shape().rank() != 0) {
return false;
}
TF_RETURN_IF_ERROR(
reshape->ReplaceAllUsesWith(broadcast->mutable_operand(0)));
return true;
}
absl::StatusOr<bool> ReshapeReshapeForwarding(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto reshape_2 = reshape->mutable_operand(0);
if (reshape_2->opcode() != HloOpcode::kReshape) {
return false;
}
if (!Shape::Equal()(reshape->shape(), reshape_2->operand(0)->shape())) {
return false;
}
TF_RETURN_IF_ERROR(
reshape->ReplaceAllUsesWith(reshape_2->mutable_operand(0)));
return true;
}
absl::StatusOr<bool> IdentityConvertRemoving(HloInstruction* convert) {
if (convert->opcode() != HloOpcode::kConvert) {
return false;
}
auto operand = convert->mutable_operand(0);
if (Shape::Equal()(convert->shape(), operand->shape())) {
TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(operand));
return true;
}
return false;
}
absl::StatusOr<bool> IdentityReshapeRemoving(HloInstruction* reshape) {
if (reshape->opcode() != HloOpcode::kReshape) {
return false;
}
auto operand = reshape->mutable_operand(0);
if (Shape::Equal()(reshape->shape(), operand->shape())) {
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(operand));
return true;
}
return false;
}
}
absl::StatusOr<bool> DynamicDimensionSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "DynamicDimensionSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ConcatForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, SliceConcatForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeBroadcastForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeReshapeForwarding(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, IdentityConvertRemoving(inst));
changed |= local_changed;
}
}
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool local_changed, IdentityReshapeRemoving(inst));
changed |= local_changed;
}
}
XLA_VLOG_LINES(
2, "DynamicDimensionSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/dynamic_dimension_simplifier.h"
#include <memory>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace m = match;
class DynamicDimensionSimplifierTest : public HloTestBase {};
TEST_F(DynamicDimensionSimplifierTest, ForwardConcat) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat1 = s32[2] concatenate(p0, p1), dimensions={0}
ROOT concat2 = s32[3] concatenate(concat1, p2), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Concatenate(m::Parameter(0), m::Parameter(1),
m::Parameter(2))));
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatMultipleDims) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1, 1] parameter(0)
p1 = s32[1, 1] parameter(1)
p2 = s32[2, 1] parameter(2)
concat1 = s32[2, 1] concatenate(p0, p1), dimensions={0}
ROOT concat2 = s32[2, 2] concatenate(concat1, p2), dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, ForwardConcatSlice) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[1] slice(concat), slice={[1:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(1)));
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceSizeMismatch) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[2] slice(concat), slice={[1:3]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceStrided) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
p1 = s32[1] parameter(1)
p2 = s32[1] parameter(2)
concat = s32[3] concatenate(p0, p1, p2), dimensions={0}
ROOT slice = s32[1] slice(concat), slice={[1:2:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, BroadcastReshapeForwarding) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[] parameter(0)
broadcast = s32[1] broadcast(p0), dimensions={}
ROOT reshape = s32[] reshape(broadcast)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(DynamicDimensionSimplifierTest, ReshapeReshapeForwarding) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[] parameter(0)
reshape = s32[1] reshape(p0)
ROOT reshape2 = s32[] reshape(reshape)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
TEST_F(DynamicDimensionSimplifierTest,
DoNotReshapeReshapeForwardingShapeMismatch) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1, 1] parameter(0)
reshape = s32[1] reshape(p0)
ROOT reshape2 = s32[] reshape(reshape)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_FALSE(simplifier.Run(m.get()).value());
}
TEST_F(DynamicDimensionSimplifierTest, IdConvertRemoving) {
const char* kModuleStr = R"(
HloModule m
test {
p0 = s32[1] parameter(0)
ROOT reshape2 = s32[1] convert(p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
DynamicDimensionSimplifier simplifier;
ASSERT_TRUE(simplifier.Run(m.get()).value());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Parameter(0)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a1fba7d6-3851-4dfe-a221-936f5f6a1256 | cpp | tensorflow/tensorflow | profiler | tensorflow/lite/profiling/telemetry/profiler.cc | tensorflow/lite/profiling/telemetry/profiler_test.cc | #include "tensorflow/lite/profiling/telemetry/profiler.h"
#include <cstdint>
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite::telemetry {
void TelemetryProfiler::AddEvent(const char* tag, EventType event_type,
uint64_t metric, int64_t event_metadata1,
int64_t event_metadata2) {
switch (event_type) {
case EventType::TELEMETRY_EVENT:
case EventType::TELEMETRY_DELEGATE_EVENT: {
if (event_metadata1 == -1) {
ReportTelemetryEvent(tag, TelemetryStatusCode(metric));
} else {
ReportTelemetryOpEvent(tag, event_metadata1, event_metadata2,
TelemetryStatusCode(metric));
}
break;
}
case EventType::OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_PROFILED_OPERATOR_INVOKE_EVENT: {
ReportOpInvokeEvent(tag, metric, event_metadata1, event_metadata2);
break;
}
default:
return;
}
}
void TelemetryProfiler::AddEventWithData(const char* tag, EventType event_type,
const void* data) {
switch (event_type) {
case EventType::TELEMETRY_REPORT_SETTINGS:
case EventType::TELEMETRY_DELEGATE_REPORT_SETTINGS: {
auto* settings = reinterpret_cast<const TfLiteTelemetrySettings*>(data);
if (settings) {
ReportSettings(tag, settings);
}
break;
}
default:
return;
}
}
uint32_t TelemetryProfiler::BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
switch (event_type) {
case EventType::OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_PROFILED_OPERATOR_INVOKE_EVENT: {
return ReportBeginOpInvokeEvent(tag, event_metadata1, event_metadata2);
}
default:
return UINT32_MAX;
}
}
void TelemetryProfiler::EndEvent(uint32_t event_handle) {
if (event_handle == UINT32_MAX) return;
ReportEndOpInvokeEvent(event_handle);
}
class TfLiteTelemetryProfiler : public TelemetryProfiler {
public:
explicit TfLiteTelemetryProfiler(TfLiteTelemetryProfilerStruct* profiler)
: profiler_(profiler) {}
void ReportTelemetryEvent(const char* event_name,
TelemetryStatusCode status) override;
void ReportTelemetryOpEvent(const char* event_name, int64_t op_idx,
int64_t subgraph_idx,
TelemetryStatusCode status) override;
void ReportSettings(const char* setting_name,
const TfLiteTelemetrySettings* settings) override;
uint32_t ReportBeginOpInvokeEvent(const char* op_name, int64_t op_idx,
int64_t subgraph_idx) override;
void ReportEndOpInvokeEvent(uint32_t event_handle) override;
void ReportOpInvokeEvent(const char* op_name, uint64_t elapsed_time,
int64_t op_idx, int64_t subgraph_idx) override;
private:
TfLiteTelemetryProfilerStruct* profiler_ = nullptr;
};
void TfLiteTelemetryProfiler::ReportTelemetryEvent(const char* event_name,
TelemetryStatusCode status) {
profiler_->ReportTelemetryEvent(profiler_, event_name, status.code());
}
void TfLiteTelemetryProfiler::ReportTelemetryOpEvent(
const char* event_name, int64_t op_idx, int64_t subgraph_idx,
TelemetryStatusCode status) {
profiler_->ReportTelemetryOpEvent(profiler_, event_name, op_idx, subgraph_idx,
status.code());
}
void TfLiteTelemetryProfiler::ReportSettings(
const char* setting_name, const TfLiteTelemetrySettings* settings) {
profiler_->ReportSettings(profiler_, setting_name, settings);
}
uint32_t TfLiteTelemetryProfiler::ReportBeginOpInvokeEvent(
const char* op_name, int64_t op_idx, int64_t subgraph_idx) {
return profiler_->ReportBeginOpInvokeEvent(profiler_, op_name, op_idx,
subgraph_idx);
}
void TfLiteTelemetryProfiler::ReportEndOpInvokeEvent(uint32_t event_handle) {
profiler_->ReportEndOpInvokeEvent(profiler_, event_handle);
}
void TfLiteTelemetryProfiler::ReportOpInvokeEvent(const char* op_name,
uint64_t elapsed_time,
int64_t op_idx,
int64_t subgraph_idx) {
profiler_->ReportOpInvokeEvent(profiler_, op_name, elapsed_time, op_idx,
subgraph_idx);
}
TelemetryProfiler* MakeTfLiteTelemetryProfiler(
TfLiteTelemetryProfilerStruct* profiler) {
return new TfLiteTelemetryProfiler(profiler);
}
} | #include "tensorflow/lite/profiling/telemetry/profiler.h"
#include <cstdint>
#include <iostream>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/telemetry/c/telemetry_setting.h"
#include "tensorflow/lite/profiling/telemetry/telemetry_status.h"
namespace tflite::telemetry {
namespace {
constexpr char kEventName[] = "event_name";
constexpr char kSettingName[] = "setting_name";
class MockTelemtryProfiler : public TelemetryProfiler {
public:
MOCK_METHOD(void, ReportTelemetryEvent,
(const char* event_name, TelemetryStatusCode status), (override));
MOCK_METHOD(void, ReportTelemetryOpEvent,
(const char* event_name, int64_t op_idx, int64_t subgraph_idx,
TelemetryStatusCode status),
(override));
MOCK_METHOD(void, ReportSettings,
(const char* setting_name,
const TfLiteTelemetrySettings* settings),
(override));
MOCK_METHOD(uint32_t, ReportBeginOpInvokeEvent,
(const char* op_name, int64_t op_idx, int64_t subgraph_idx),
(override));
MOCK_METHOD(void, ReportEndOpInvokeEvent, (uint32_t event_handle),
(override));
MOCK_METHOD(void, ReportOpInvokeEvent,
(const char* op_name, uint64_t elapsed_time, int64_t op_idx,
int64_t subgraph_idx),
(override));
};
class TelemetryStructTest : public ::testing::Test {
protected:
TelemetryStructTest() {
context_.profiler = &profiler_;
profiler_struct_.data = &mock_profiler_;
profiler_struct_.ReportTelemetryEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* event_name, uint64_t status) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportTelemetryEvent(
event_name, tflite::telemetry::TelemetryStatusCode(status));
};
profiler_struct_.ReportTelemetryOpEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* event_name, int64_t op_idx, int64_t subgraph_idx,
uint64_t status) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportTelemetryOpEvent(
event_name, op_idx, subgraph_idx,
tflite::telemetry::TelemetryStatusCode(status));
};
profiler_struct_.ReportSettings =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* setting_name, const TfLiteTelemetrySettings* settings) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportSettings(setting_name, settings);
};
profiler_struct_.ReportBeginOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name,
int64_t op_idx, int64_t subgraph_idx) -> uint32_t {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportBeginOpInvokeEvent(op_name, op_idx, subgraph_idx);
};
profiler_struct_.ReportEndOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
uint32_t event_handle) {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportEndOpInvokeEvent(event_handle);
};
profiler_struct_.ReportOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name,
uint64_t elapsed_time, int64_t op_idx, int64_t subgraph_idx) {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportOpInvokeEvent(op_name, elapsed_time, op_idx,
subgraph_idx);
};
profiler_.reset(telemetry::MakeTfLiteTelemetryProfiler(&profiler_struct_));
}
MockTelemtryProfiler mock_profiler_;
std::unique_ptr<TelemetryProfiler> profiler_;
TfLiteContext context_;
TfLiteTelemetryProfilerStruct profiler_struct_;
};
TEST_F(TelemetryStructTest, TelemetryReportEvent) {
EXPECT_CALL(mock_profiler_,
ReportTelemetryEvent(kEventName, TelemetryStatusCode(kTfLiteOk)));
profiler_->ReportTelemetryEvent(kEventName, TelemetryStatusCode(kTfLiteOk));
}
TEST_F(TelemetryStructTest, TelemetryReportOpEvent) {
EXPECT_CALL(
mock_profiler_,
ReportTelemetryOpEvent(kEventName, 1, 2, TelemetryStatusCode(kTfLiteOk)));
profiler_->ReportTelemetryOpEvent(kEventName, 1, 2,
TelemetryStatusCode(kTfLiteOk));
}
TEST_F(TelemetryStructTest, TelemetryReportSettings) {
EXPECT_CALL(mock_profiler_, ReportSettings(kSettingName, testing::_));
TfLiteTelemetrySettings settings{};
profiler_->ReportSettings(kSettingName, &settings);
}
TEST_F(TelemetryStructTest, TelemetryReportBeginOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportBeginOpInvokeEvent(kSettingName, 1, 2));
profiler_->ReportBeginOpInvokeEvent(kSettingName, 1, 2);
}
TEST_F(TelemetryStructTest, TelemetryReportEndOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportEndOpInvokeEvent(1));
profiler_->ReportEndOpInvokeEvent(1);
}
TEST_F(TelemetryStructTest, TelemetryReportOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportOpInvokeEvent(kSettingName, 1, 2, 3));
profiler_->ReportOpInvokeEvent(kSettingName, 1, 2, 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/telemetry/profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/telemetry/profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8caf7b1e-1994-4627-83bd-4a5595f8f5cf | cpp | tensorflow/tensorflow | global_pooling_to_reduce_op | tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.cc | tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsGlobalPooling(const Pooling2DAttributes& attr, const BHWC& src_shape,
const BHWC& dst_shape) {
return dst_shape.w == 1 && dst_shape.h == 1 && attr.kernel.w == src_shape.w &&
attr.kernel.h == src_shape.h && attr.padding.appended.w == 0 &&
attr.padding.appended.h == 0 && attr.padding.prepended.w == 0 &&
attr.padding.prepended.h == 0;
}
bool IsGlobalAveragePooling(const Pooling2DAttributes& attr,
const BHWC& src_shape, const BHWC& dst_shape) {
return attr.type == tflite::gpu::PoolingType::AVERAGE &&
attr.output_indices == false &&
IsGlobalPooling(attr, src_shape, dst_shape);
}
class GlobalPoolingToReduceOp : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::POOLING_2D)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
auto outputs = graph->FindOutputs(node->id);
const auto& pool_attr =
std::any_cast<const Pooling2DAttributes&>(node->operation.attributes);
if (!IsGlobalAveragePooling(pool_attr, inputs[0]->tensor.shape,
outputs[0]->tensor.shape)) {
return {TransformStatus::SKIPPED, ""};
}
MeanAttributes mean_attr;
mean_attr.dims = {Axis::WIDTH, Axis::HEIGHT};
node->operation.attributes = mean_attr;
node->operation.type = ToString(OperationType::MEAN);
return {TransformStatus::APPLIED,
"Replaced global average pooling with mean."};
}
};
}
std::unique_ptr<NodeTransformation> NewGlobalPoolingToReduceOp() {
return std::make_unique<GlobalPoolingToReduceOp>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakeMeanFromGlobalAveragePooling, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Pooling2DAttributes attr;
attr.padding.prepended = tflite::gpu::HW(0, 0);
attr.padding.appended = tflite::gpu::HW(0, 0);
attr.strides = tflite::gpu::HW(4, 4);
attr.kernel = tflite::gpu::HW(4, 4);
attr.type = tflite::gpu::PoolingType::AVERAGE;
attr.output_indices = false;
auto pool_node = graph.NewNode();
pool_node->operation.type = ToString(OperationType::POOLING_2D);
pool_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(pool_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, pool_node, &output).ok());
output->tensor.shape = BHWC(1, 1, 1, 8);
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
auto transformation = NewGlobalPoolingToReduceOp();
ModelTransformer transformer(&graph);
transformer.Apply("global_average_pooling_to_mean", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(ToString(OperationType::MEAN), graph.nodes()[0]->operation.type);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5dcf037c-7363-4ecd-9b42-7d5c93368df1 | cpp | tensorflow/tensorflow | layout_to_xla_sharding | tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.cc | tensorflow/dtensor/tests/layout_to_xla_sharding_test.cc | #include "tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
namespace tensorflow {
namespace dtensor {
namespace {
void PopulateDevices(absl::Span<const int64_t> permutation,
absl::Span<const int64_t> sizes,
absl::Span<const int64_t> cum_sizes,
std::vector<int64_t>* out_devices, int64_t base = 0) {
int expanding_dim = permutation[0];
int expanding_dim_size = sizes[expanding_dim];
int expanding_cum_dim_size = cum_sizes[expanding_dim];
for (int i = 0; i < expanding_dim_size; ++i) {
if (permutation.size() == 1) {
out_devices->push_back(base + i * expanding_cum_dim_size);
} else {
PopulateDevices(permutation.subspan(1), sizes, cum_sizes, out_devices,
base + i * expanding_cum_dim_size);
}
}
}
}
std::vector<int64_t> MeshMajorToMinor::ToDeviceList() {
std::vector<int64_t> cum_sizes(sizes.size());
int64_t cum_size = 1;
for (int i = sizes.size() - 1; i >= 0; --i) {
cum_sizes[i] = cum_size;
cum_size *= sizes[i];
}
std::vector<int64_t> devices;
devices.reserve(cum_size * sizes[0]);
PopulateDevices(permutation, sizes, cum_sizes, &devices);
return devices;
}
StatusOr<MeshMajorToMinor> ConvertMeshMajorToMinor(const Layout& layout,
const Mesh& mesh) {
MeshMajorToMinor major_to_minor;
major_to_minor.permutation.reserve(mesh.dims().size());
major_to_minor.sizes.reserve(mesh.dims().size());
absl::flat_hash_map<std::string, int64_t> dim_name_to_index_map;
for (const auto& [index, mesh_dim] : llvm::enumerate(mesh.dims())) {
major_to_minor.sizes.push_back(mesh_dim.size);
dim_name_to_index_map[mesh_dim.name] = index;
}
for (const auto& spec : layout.sharding_spec_strs()) {
if (mesh.IsMeshDim(spec)) {
const auto it = dim_name_to_index_map.find(spec);
TF_RET_CHECK(it != dim_name_to_index_map.end());
const auto& dimension_index = it->second;
major_to_minor.permutation.push_back(dimension_index);
dim_name_to_index_map.erase(it);
}
}
for (const auto& [name, unused_size] : mesh.dims()) {
if (const auto it = dim_name_to_index_map.find(name);
it != dim_name_to_index_map.end()) {
const auto& dimension_index = it->second;
major_to_minor.permutation.push_back(dimension_index);
}
}
TF_RET_CHECK(major_to_minor.permutation.size() ==
major_to_minor.sizes.size());
return major_to_minor;
}
StatusOr<::xla::OpSharding> ConvertLayoutToXlaOpSharding(const Layout& layout) {
::xla::OpSharding xla_sharding;
if (layout.IsSingleDevice()) {
xla_sharding.set_type(::xla::OpSharding::MAXIMAL);
return xla_sharding;
} else if (layout.IsFullyReplicated()) {
xla_sharding.set_type(::xla::OpSharding::REPLICATED);
return xla_sharding;
}
xla_sharding.set_type(::xla::OpSharding::OTHER);
const Mesh& mesh = layout.mesh();
{
int32 product_of_sharded_dimensions = 1;
for (int32 dim_size : layout.num_shards()) {
product_of_sharded_dimensions *= dim_size;
xla_sharding.add_tile_assignment_dimensions(dim_size);
}
if (product_of_sharded_dimensions != mesh.num_devices()) {
xla_sharding.add_tile_assignment_dimensions(
mesh.num_devices() / product_of_sharded_dimensions);
xla_sharding.set_replicate_on_last_tile_dim(true);
}
}
TF_ASSIGN_OR_RETURN(auto major_to_minor,
ConvertMeshMajorToMinor(layout, mesh));
std::vector<int64_t> tile_assignment_devices = major_to_minor.ToDeviceList();
*(xla_sharding.mutable_tile_assignment_devices()) = {
tile_assignment_devices.begin(), tile_assignment_devices.end()};
return xla_sharding;
}
}
} | #include "tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.h"
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "benchmark/benchmark.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace dtensor {
namespace {
StatusOr<std::string> ConvertLayoutStrToHloShardingStr(std::string layout_str) {
TF_ASSIGN_OR_RETURN(const Layout layout, Layout::FromString(layout_str));
TF_ASSIGN_OR_RETURN(const xla::OpSharding op_sharding,
ConvertLayoutToXlaOpSharding(layout));
TF_ASSIGN_OR_RETURN(const auto hlo_sharding,
xla::HloSharding::FromProto(op_sharding));
return hlo_sharding.ToString();
}
TEST(LayoutToXLAShardingTest, ReplicatedLayout1D) {
std::string layout_str =
"sharding_specs:unsharded, "
"mesh:|x=2|0,1|0,1|/job:localhost/task:0/device:CPU:0,/job:localhost/"
"task:0/device:CPU:1";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{replicated}", sharding);
}
TEST(LayoutToXLAShardingTest, ReplicatedLayout2D) {
std::string layout_str =
"sharding_specs:unsharded,unsharded "
"mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{replicated}", sharding);
}
TEST(LayoutToXLAShardingTest, ReplicatedLayout3D) {
std::string layout_str =
"sharding_specs:unsharded,unsharded,unsharded, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{replicated}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedLayout1D) {
std::string layout_str =
"sharding_specs:x, "
"mesh:|x=3|0,1,2|0,1,2|/job:localhost/task:0/device:CPU:0,/job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[3]0,1,2}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedLayout2D) {
std::string layout_str =
"sharding_specs:x,y, "
"mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2]0,1,2,3}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedLayout2DAsymmetricMesh) {
std::string layout_str =
"sharding_specs:y,x, "
"mesh:|x=2,y=4|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/job:localhost/task:0/device:CPU:1,/job:localhost/task:0/"
"device:CPU:2,/job:localhost/task:0/device:CPU:3,/job:localhost/task:0/"
"device:CPU:4,/job:localhost/task:0/device:CPU:5,/job:localhost/task:0/"
"device:CPU:6,/job:localhost/task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[4,2]0,4,1,5,2,6,3,7}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout2D) {
std::string layout_str =
"sharding_specs:y,x, "
"mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2]0,2,1,3}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedLayout3D) {
std::string layout_str =
"sharding_specs:x,y,z, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2,2]0,1,2,3,4,5,6,7}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout3D_1) {
std::string layout_str =
"sharding_specs:z,x,y, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2,2]0,2,4,6,1,3,5,7}", sharding);
}
TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout3D_2) {
std::string layout_str =
"sharding_specs:z,y,x, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2,2]0,4,2,6,1,5,3,7}", sharding);
}
TEST(LayoutToXLAShardingTest, PartiallyShardedLayout2D) {
std::string layout_str =
"sharding_specs:x,unsharded, "
"mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}", sharding);
}
TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout2D) {
std::string layout_str =
"sharding_specs:y,unsharded, "
"mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}", sharding);
}
TEST(LayoutToXLAShardingTest, PartiallyShardedLayout3D_1) {
std::string layout_str =
"sharding_specs:x,y,unsharded, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}",
sharding);
}
TEST(LayoutToXLAShardingTest, PartiallyShardedLayout3D_2) {
std::string layout_str =
"sharding_specs:x,unsharded,unsharded, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}",
sharding);
}
TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout3D_1) {
std::string layout_str =
"sharding_specs:z,y,unsharded, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,2,1,2]0,4,2,6,1,5,3,7 last_tile_dim_replicate}",
sharding);
}
TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout3D_2) {
std::string layout_str =
"sharding_specs:y,unsharded,z, "
"mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/"
"device:CPU:0,/"
"job:localhost/"
"task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/"
"task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/"
"task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/"
"task:0/device:CPU:7";
TF_ASSERT_OK_AND_ASSIGN(std::string sharding,
ConvertLayoutStrToHloShardingStr(layout_str));
EXPECT_EQ("{devices=[2,1,2,2]0,4,1,5,2,6,3,7 last_tile_dim_replicate}",
sharding);
}
void BM_65536Devices(benchmark::State& state) {
std::vector<int64_t> device_ids(65536);
absl::c_iota(device_ids, 0);
std::vector<std::string> devices_str(65536);
absl::c_generate(devices_str, [n = 0]() mutable {
return absl::StrCat("/job:localhost/task:0/device:CPU:", n++);
});
auto mesh = Mesh::CreateMesh("", {"x", "y", "z"},
{8, 128, 64},
device_ids,
{},
device_ids,
devices_str);
TF_ASSERT_OK_AND_ASSIGN(auto layout,
Layout::GetLayout({"x", "y", "z"}, mesh));
for (auto s : state) {
TF_EXPECT_OK(ConvertLayoutToXlaOpSharding(layout).status());
}
}
BENCHMARK(BM_65536Devices);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/layout_to_xla_sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb9051f2-6655-44b1-ac5c-e4c26785b4a3 | cpp | tensorflow/tensorflow | call_once | tensorflow/lite/kernels/call_once.cc | tensorflow/lite/kernels/call_once_test.cc | #include <stddef.h>
#include <cstring>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/resource/initialization_status.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace call_once_kernel {
struct OpData {
int init_subgraph_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData;
const auto* params = reinterpret_cast<const TfLiteCallOnceParams*>(buffer);
op_data->init_subgraph_index = params->init_subgraph_index;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
resource::InitializationStatusMap* map =
&this_subgraph->initialization_status_map();
resource::InitializationStatus* status =
resource::GetInitializationStatus(map, op_data->init_subgraph_index);
if (status->IsInitialized()) return kTfLiteOk;
auto* subgraphs = this_subgraph->GetSubgraphs();
TF_LITE_ENSURE_EQ(context, node->inputs->size, 0);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 0);
TF_LITE_ENSURE(context, op_data->init_subgraph_index < subgraphs->size());
Subgraph* init_subgraph = (*subgraphs)[op_data->init_subgraph_index].get();
TF_LITE_ENSURE_EQ(context, init_subgraph->inputs().size(), 0);
TF_LITE_ENSURE_EQ(context, init_subgraph->outputs().size(), 0);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
resource::InitializationStatusMap* map =
&this_subgraph->initialization_status_map();
resource::InitializationStatus* status =
resource::GetInitializationStatus(map, op_data->init_subgraph_index);
if (status->IsInitialized()) return kTfLiteOk;
auto* subgraphs = this_subgraph->GetSubgraphs();
Subgraph& init_subgraph = *(*subgraphs)[op_data->init_subgraph_index];
TF_LITE_ENSURE_OK(context, init_subgraph.AllocateTensors());
TF_LITE_ENSURE_OK(context, init_subgraph.Invoke());
TF_LITE_ENSURE_OK(context, init_subgraph.ReleaseNonPersistentMemory());
status->MarkInitializationIsDone();
return kTfLiteOk;
}
}
TfLiteRegistration* Register_CALL_ONCE() {
static TfLiteRegistration r = {call_once_kernel::Init, call_once_kernel::Free,
call_once_kernel::Prepare,
call_once_kernel::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite {
using subgraph_test_util::ControlFlowOpTest;
namespace {
class CallOnceTest : public ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildCallOnceAndReadVariableSubgraph(
&interpreter_->primary_subgraph());
builder_->BuildAssignRandomValueToVariableSubgraph(
interpreter_->subgraph(1));
builder_->BuildCallOnceAndReadVariablePlusOneSubgraph(
interpreter_->subgraph(2));
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->subgraph(2)->AllocateTensors(), kTfLiteOk);
}
};
TEST_F(CallOnceTest, TestSimple) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
EXPECT_GT(output->data.i32[0], 0);
}
TEST_F(CallOnceTest, TestInvokeMultipleTimes) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
int value = output->data.i32[0];
EXPECT_GT(value, 0);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
ASSERT_EQ(output->data.i32[0], value);
}
}
TEST_F(CallOnceTest, TestInvokeOnceAcrossMultipleEntryPoints) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
int value = output->data.i32[0];
EXPECT_GT(value, 0);
ASSERT_EQ(interpreter_->subgraph(2)->Invoke(), kTfLiteOk);
output = interpreter_->subgraph(2)->tensor(
interpreter_->subgraph(2)->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
ASSERT_EQ(output->data.i32[0], value + 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/call_once.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/call_once_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
098ee8f8-91fa-497d-8970-0aa185f2ecb9 | cpp | google/leveldb | table | table/table.cc | table/table_test.cc | #include "leveldb/table.h"
#include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/env.h"
#include "leveldb/filter_policy.h"
#include "leveldb/options.h"
#include "table/block.h"
#include "table/filter_block.h"
#include "table/format.h"
#include "table/two_level_iterator.h"
#include "util/coding.h"
namespace leveldb {
struct Table::Rep {
~Rep() {
delete filter;
delete[] filter_data;
delete index_block;
}
Options options;
Status status;
RandomAccessFile* file;
uint64_t cache_id;
FilterBlockReader* filter;
const char* filter_data;
BlockHandle metaindex_handle;
Block* index_block;
};
Status Table::Open(const Options& options, RandomAccessFile* file,
uint64_t size, Table** table) {
*table = nullptr;
if (size < Footer::kEncodedLength) {
return Status::Corruption("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
Slice footer_input;
Status s = file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength,
&footer_input, footer_space);
if (!s.ok()) return s;
Footer footer;
s = footer.DecodeFrom(&footer_input);
if (!s.ok()) return s;
BlockContents index_block_contents;
ReadOptions opt;
if (options.paranoid_checks) {
opt.verify_checksums = true;
}
s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
if (s.ok()) {
Block* index_block = new Block(index_block_contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
rep->filter_data = nullptr;
rep->filter = nullptr;
*table = new Table(rep);
(*table)->ReadMeta(footer);
}
return s;
}
void Table::ReadMeta(const Footer& footer) {
if (rep_->options.filter_policy == nullptr) {
return;
}
ReadOptions opt;
if (rep_->options.paranoid_checks) {
opt.verify_checksums = true;
}
BlockContents contents;
if (!ReadBlock(rep_->file, opt, footer.metaindex_handle(), &contents).ok()) {
return;
}
Block* meta = new Block(contents);
Iterator* iter = meta->NewIterator(BytewiseComparator());
std::string key = "filter.";
key.append(rep_->options.filter_policy->Name());
iter->Seek(key);
if (iter->Valid() && iter->key() == Slice(key)) {
ReadFilter(iter->value());
}
delete iter;
delete meta;
}
void Table::ReadFilter(const Slice& filter_handle_value) {
Slice v = filter_handle_value;
BlockHandle filter_handle;
if (!filter_handle.DecodeFrom(&v).ok()) {
return;
}
ReadOptions opt;
if (rep_->options.paranoid_checks) {
opt.verify_checksums = true;
}
BlockContents block;
if (!ReadBlock(rep_->file, opt, filter_handle, &block).ok()) {
return;
}
if (block.heap_allocated) {
rep_->filter_data = block.data.data();
}
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
}
Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
}
static void DeleteCachedBlock(const Slice& key, void* value) {
Block* block = reinterpret_cast<Block*>(value);
delete block;
}
static void ReleaseBlock(void* arg, void* h) {
Cache* cache = reinterpret_cast<Cache*>(arg);
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
cache->Release(handle);
}
Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
const Slice& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
Block* block = nullptr;
Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
Slice input = index_value;
Status s = handle.DecodeFrom(&input);
if (s.ok()) {
BlockContents contents;
if (block_cache != nullptr) {
char cache_key_buffer[16];
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
EncodeFixed64(cache_key_buffer + 8, handle.offset());
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
if (contents.cachable && options.fill_cache) {
cache_handle = block_cache->Insert(key, block, block->size(),
&DeleteCachedBlock);
}
}
}
} else {
s = ReadBlock(table->rep_->file, options, handle, &contents);
if (s.ok()) {
block = new Block(contents);
}
}
}
Iterator* iter;
if (block != nullptr) {
iter = block->NewIterator(table->rep_->options.comparator);
if (cache_handle == nullptr) {
iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
} else {
iter = NewErrorIterator(s);
}
return iter;
}
Iterator* Table::NewIterator(const ReadOptions& options) const {
return NewTwoLevelIterator(
rep_->index_block->NewIterator(rep_->options.comparator),
&Table::BlockReader, const_cast<Table*>(this), options);
}
Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
void (*handle_result)(void*, const Slice&,
const Slice&)) {
Status s;
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
iiter->Seek(k);
if (iiter->Valid()) {
Slice handle_value = iiter->value();
FilterBlockReader* filter = rep_->filter;
BlockHandle handle;
if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
!filter->KeyMayMatch(handle.offset(), k)) {
} else {
Iterator* block_iter = BlockReader(this, options, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
(*handle_result)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
}
}
if (s.ok()) {
s = iiter->status();
}
delete iiter;
return s;
}
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
Iterator* index_iter =
rep_->index_block->NewIterator(rep_->options.comparator);
index_iter->Seek(key);
uint64_t result;
if (index_iter->Valid()) {
BlockHandle handle;
Slice input = index_iter->value();
Status s = handle.DecodeFrom(&input);
if (s.ok()) {
result = handle.offset();
} else {
result = rep_->metaindex_handle.offset();
}
} else {
result = rep_->metaindex_handle.offset();
}
delete index_iter;
return result;
}
} | #include "leveldb/table.h"
#include <map>
#include <string>
#include "gtest/gtest.h"
#include "db/dbformat.h"
#include "db/memtable.h"
#include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
#include "leveldb/table_builder.h"
#include "table/block.h"
#include "table/block_builder.h"
#include "table/format.h"
#include "util/random.h"
#include "util/testutil.h"
namespace leveldb {
static std::string Reverse(const Slice& key) {
std::string str(key.ToString());
std::string rev("");
for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
++rit) {
rev.push_back(*rit);
}
return rev;
}
namespace {
class ReverseKeyComparator : public Comparator {
public:
const char* Name() const override {
return "leveldb.ReverseBytewiseComparator";
}
int Compare(const Slice& a, const Slice& b) const override {
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
}
void FindShortestSeparator(std::string* start,
const Slice& limit) const override {
std::string s = Reverse(*start);
std::string l = Reverse(limit);
BytewiseComparator()->FindShortestSeparator(&s, l);
*start = Reverse(s);
}
void FindShortSuccessor(std::string* key) const override {
std::string s = Reverse(*key);
BytewiseComparator()->FindShortSuccessor(&s);
*key = Reverse(s);
}
};
}
static ReverseKeyComparator reverse_key_comparator;
static void Increment(const Comparator* cmp, std::string* key) {
if (cmp == BytewiseComparator()) {
key->push_back('\0');
} else {
assert(cmp == &reverse_key_comparator);
std::string rev = Reverse(*key);
rev.push_back('\0');
*key = Reverse(rev);
}
}
namespace {
struct STLLessThan {
const Comparator* cmp;
STLLessThan() : cmp(BytewiseComparator()) {}
STLLessThan(const Comparator* c) : cmp(c) {}
bool operator()(const std::string& a, const std::string& b) const {
return cmp->Compare(Slice(a), Slice(b)) < 0;
}
};
}
class StringSink : public WritableFile {
public:
~StringSink() override = default;
const std::string& contents() const { return contents_; }
Status Close() override { return Status::OK(); }
Status Flush() override { return Status::OK(); }
Status Sync() override { return Status::OK(); }
Status Append(const Slice& data) override {
contents_.append(data.data(), data.size());
return Status::OK();
}
private:
std::string contents_;
};
class StringSource : public RandomAccessFile {
public:
StringSource(const Slice& contents)
: contents_(contents.data(), contents.size()) {}
~StringSource() override = default;
uint64_t Size() const { return contents_.size(); }
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
if (offset >= contents_.size()) {
return Status::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
n = contents_.size() - offset;
}
std::memcpy(scratch, &contents_[offset], n);
*result = Slice(scratch, n);
return Status::OK();
}
private:
std::string contents_;
};
typedef std::map<std::string, std::string, STLLessThan> KVMap;
class Constructor {
public:
explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
virtual ~Constructor() = default;
void Add(const std::string& key, const Slice& value) {
data_[key] = value.ToString();
}
void Finish(const Options& options, std::vector<std::string>* keys,
KVMap* kvmap) {
*kvmap = data_;
keys->clear();
for (const auto& kvp : data_) {
keys->push_back(kvp.first);
}
data_.clear();
Status s = FinishImpl(options, *kvmap);
ASSERT_TRUE(s.ok()) << s.ToString();
}
virtual Status FinishImpl(const Options& options, const KVMap& data) = 0;
virtual Iterator* NewIterator() const = 0;
const KVMap& data() const { return data_; }
virtual DB* db() const { return nullptr; }
private:
KVMap data_;
};
class BlockConstructor : public Constructor {
public:
explicit BlockConstructor(const Comparator* cmp)
: Constructor(cmp), comparator_(cmp), block_(nullptr) {}
~BlockConstructor() override { delete block_; }
Status FinishImpl(const Options& options, const KVMap& data) override {
delete block_;
block_ = nullptr;
BlockBuilder builder(&options);
for (const auto& kvp : data) {
builder.Add(kvp.first, kvp.second);
}
data_ = builder.Finish().ToString();
BlockContents contents;
contents.data = data_;
contents.cachable = false;
contents.heap_allocated = false;
block_ = new Block(contents);
return Status::OK();
}
Iterator* NewIterator() const override {
return block_->NewIterator(comparator_);
}
private:
const Comparator* const comparator_;
std::string data_;
Block* block_;
BlockConstructor();
};
class TableConstructor : public Constructor {
public:
TableConstructor(const Comparator* cmp)
: Constructor(cmp), source_(nullptr), table_(nullptr) {}
~TableConstructor() override { Reset(); }
Status FinishImpl(const Options& options, const KVMap& data) override {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
for (const auto& kvp : data) {
builder.Add(kvp.first, kvp.second);
EXPECT_LEVELDB_OK(builder.status());
}
Status s = builder.Finish();
EXPECT_LEVELDB_OK(s);
EXPECT_EQ(sink.contents().size(), builder.FileSize());
source_ = new StringSource(sink.contents());
Options table_options;
table_options.comparator = options.comparator;
return Table::Open(table_options, source_, sink.contents().size(), &table_);
}
Iterator* NewIterator() const override {
return table_->NewIterator(ReadOptions());
}
uint64_t ApproximateOffsetOf(const Slice& key) const {
return table_->ApproximateOffsetOf(key);
}
private:
void Reset() {
delete table_;
delete source_;
table_ = nullptr;
source_ = nullptr;
}
StringSource* source_;
Table* table_;
TableConstructor();
};
class KeyConvertingIterator : public Iterator {
public:
explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
KeyConvertingIterator(const KeyConvertingIterator&) = delete;
KeyConvertingIterator& operator=(const KeyConvertingIterator&) = delete;
~KeyConvertingIterator() override { delete iter_; }
bool Valid() const override { return iter_->Valid(); }
void Seek(const Slice& target) override {
ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue);
std::string encoded;
AppendInternalKey(&encoded, ikey);
iter_->Seek(encoded);
}
void SeekToFirst() override { iter_->SeekToFirst(); }
void SeekToLast() override { iter_->SeekToLast(); }
void Next() override { iter_->Next(); }
void Prev() override { iter_->Prev(); }
Slice key() const override {
assert(Valid());
ParsedInternalKey key;
if (!ParseInternalKey(iter_->key(), &key)) {
status_ = Status::Corruption("malformed internal key");
return Slice("corrupted key");
}
return key.user_key;
}
Slice value() const override { return iter_->value(); }
Status status() const override {
return status_.ok() ? iter_->status() : status_;
}
private:
mutable Status status_;
Iterator* iter_;
};
class MemTableConstructor : public Constructor {
public:
explicit MemTableConstructor(const Comparator* cmp)
: Constructor(cmp), internal_comparator_(cmp) {
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
}
~MemTableConstructor() override { memtable_->Unref(); }
Status FinishImpl(const Options& options, const KVMap& data) override {
memtable_->Unref();
memtable_ = new MemTable(internal_comparator_);
memtable_->Ref();
int seq = 1;
for (const auto& kvp : data) {
memtable_->Add(seq, kTypeValue, kvp.first, kvp.second);
seq++;
}
return Status::OK();
}
Iterator* NewIterator() const override {
return new KeyConvertingIterator(memtable_->NewIterator());
}
private:
const InternalKeyComparator internal_comparator_;
MemTable* memtable_;
};
class DBConstructor : public Constructor {
public:
explicit DBConstructor(const Comparator* cmp)
: Constructor(cmp), comparator_(cmp) {
db_ = nullptr;
NewDB();
}
~DBConstructor() override { delete db_; }
Status FinishImpl(const Options& options, const KVMap& data) override {
delete db_;
db_ = nullptr;
NewDB();
for (const auto& kvp : data) {
WriteBatch batch;
batch.Put(kvp.first, kvp.second);
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
}
return Status::OK();
}
Iterator* NewIterator() const override {
return db_->NewIterator(ReadOptions());
}
DB* db() const override { return db_; }
private:
void NewDB() {
std::string name = testing::TempDir() + "table_testdb";
Options options;
options.comparator = comparator_;
Status status = DestroyDB(name, options);
ASSERT_TRUE(status.ok()) << status.ToString();
options.create_if_missing = true;
options.error_if_exists = true;
options.write_buffer_size = 10000;
status = DB::Open(options, name, &db_);
ASSERT_TRUE(status.ok()) << status.ToString();
}
const Comparator* const comparator_;
DB* db_;
};
enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
struct TestArgs {
TestType type;
bool reverse_compare;
int restart_interval;
};
static const TestArgs kTestArgList[] = {
{TABLE_TEST, false, 16},
{TABLE_TEST, false, 1},
{TABLE_TEST, false, 1024},
{TABLE_TEST, true, 16},
{TABLE_TEST, true, 1},
{TABLE_TEST, true, 1024},
{BLOCK_TEST, false, 16},
{BLOCK_TEST, false, 1},
{BLOCK_TEST, false, 1024},
{BLOCK_TEST, true, 16},
{BLOCK_TEST, true, 1},
{BLOCK_TEST, true, 1024},
{MEMTABLE_TEST, false, 16},
{MEMTABLE_TEST, true, 16},
{DB_TEST, false, 16},
{DB_TEST, true, 16},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness : public testing::Test {
public:
Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
options_.block_size = 256;
if (args.reverse_compare) {
options_.comparator = &reverse_key_comparator;
}
switch (args.type) {
case TABLE_TEST:
constructor_ = new TableConstructor(options_.comparator);
break;
case BLOCK_TEST:
constructor_ = new BlockConstructor(options_.comparator);
break;
case MEMTABLE_TEST:
constructor_ = new MemTableConstructor(options_.comparator);
break;
case DB_TEST:
constructor_ = new DBConstructor(options_.comparator);
break;
}
}
~Harness() { delete constructor_; }
void Add(const std::string& key, const std::string& value) {
constructor_->Add(key, value);
}
void Test(Random* rnd) {
std::vector<std::string> keys;
KVMap data;
constructor_->Finish(options_, &keys, &data);
TestForwardScan(keys, data);
TestBackwardScan(keys, data);
TestRandomAccess(rnd, keys, data);
}
void TestForwardScan(const std::vector<std::string>& keys,
const KVMap& data) {
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Next();
}
ASSERT_TRUE(!iter->Valid());
delete iter;
}
void TestBackwardScan(const std::vector<std::string>& keys,
const KVMap& data) {
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
model_iter != data.rend(); ++model_iter) {
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
iter->Prev();
}
ASSERT_TRUE(!iter->Valid());
delete iter;
}
void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
const KVMap& data) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
KVMap::const_iterator model_iter = data.begin();
if (kVerbose) std::fprintf(stderr, "---\n");
for (int i = 0; i < 200; i++) {
const int toss = rnd->Uniform(5);
switch (toss) {
case 0: {
if (iter->Valid()) {
if (kVerbose) std::fprintf(stderr, "Next\n");
iter->Next();
++model_iter;
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
}
break;
}
case 1: {
if (kVerbose) std::fprintf(stderr, "SeekToFirst\n");
iter->SeekToFirst();
model_iter = data.begin();
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
}
case 2: {
std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
if (kVerbose)
std::fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key));
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
}
case 3: {
if (iter->Valid()) {
if (kVerbose) std::fprintf(stderr, "Prev\n");
iter->Prev();
if (model_iter == data.begin()) {
model_iter = data.end();
} else {
--model_iter;
}
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
}
break;
}
case 4: {
if (kVerbose) std::fprintf(stderr, "SeekToLast\n");
iter->SeekToLast();
if (keys.empty()) {
model_iter = data.end();
} else {
std::string last = data.rbegin()->first;
model_iter = data.lower_bound(last);
}
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break;
}
}
}
delete iter;
}
std::string ToString(const KVMap& data, const KVMap::const_iterator& it) {
if (it == data.end()) {
return "END";
} else {
return "'" + it->first + "->" + it->second + "'";
}
}
std::string ToString(const KVMap& data,
const KVMap::const_reverse_iterator& it) {
if (it == data.rend()) {
return "END";
} else {
return "'" + it->first + "->" + it->second + "'";
}
}
std::string ToString(const Iterator* it) {
if (!it->Valid()) {
return "END";
} else {
return "'" + it->key().ToString() + "->" + it->value().ToString() + "'";
}
}
std::string PickRandomKey(Random* rnd, const std::vector<std::string>& keys) {
if (keys.empty()) {
return "foo";
} else {
const int index = rnd->Uniform(keys.size());
std::string result = keys[index];
switch (rnd->Uniform(3)) {
case 0:
break;
case 1: {
if (!result.empty() && result[result.size() - 1] > '\0') {
result[result.size() - 1]--;
}
break;
}
case 2: {
Increment(options_.comparator, &result);
break;
}
}
return result;
}
}
DB* db() const { return constructor_->db(); }
private:
Options options_;
Constructor* constructor_;
};
TEST_F(Harness, Empty) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 1);
Test(&rnd);
}
}
TEST_F(Harness, ZeroRestartPointsInBlock) {
char data[sizeof(uint32_t)];
memset(data, 0, sizeof(data));
BlockContents contents;
contents.data = Slice(data, sizeof(data));
contents.cachable = false;
contents.heap_allocated = false;
Block block(contents);
Iterator* iter = block.NewIterator(BytewiseComparator());
iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid());
iter->SeekToLast();
ASSERT_TRUE(!iter->Valid());
iter->Seek("foo");
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST_F(Harness, SimpleEmptyKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 1);
Add("", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSingle) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 2);
Add("abc", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMulti) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 3);
Add("abc", "v");
Add("abcd", "v");
Add("ac", "v2");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSpecialKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 4);
Add("\xff\xff", "v3");
Test(&rnd);
}
}
TEST_F(Harness, Randomized) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 5);
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
std::fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
std::string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
}
Test(&rnd);
}
}
}
TEST_F(Harness, RandomizedLongDB) {
Random rnd(test::RandomSeed());
TestArgs args = {DB_TEST, false, 16};
Init(args);
int num_entries = 100000;
for (int e = 0; e < num_entries; e++) {
std::string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
test::RandomString(&rnd, rnd.Skewed(5), &v).ToString());
}
Test(&rnd);
int files = 0;
for (int level = 0; level < config::kNumLevels; level++) {
std::string value;
char name[100];
std::snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level);
ASSERT_TRUE(db()->GetProperty(name, &value));
files += atoi(value.c_str());
}
ASSERT_GT(files, 0);
}
TEST(MemTableTest, Simple) {
InternalKeyComparator cmp(BytewiseComparator());
MemTable* memtable = new MemTable(cmp);
memtable->Ref();
WriteBatch batch;
WriteBatchInternal::SetSequence(&batch, 100);
batch.Put(std::string("k1"), std::string("v1"));
batch.Put(std::string("k2"), std::string("v2"));
batch.Put(std::string("k3"), std::string("v3"));
batch.Put(std::string("largekey"), std::string("vlarge"));
ASSERT_TRUE(WriteBatchInternal::InsertInto(&batch, memtable).ok());
Iterator* iter = memtable->NewIterator();
iter->SeekToFirst();
while (iter->Valid()) {
std::fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
iter->value().ToString().c_str());
iter->Next();
}
delete iter;
memtable->Unref();
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c(BytewiseComparator());
c.Add("k01", "hello");
c.Add("k02", "hello2");
c.Add("k03", std::string(10000, 'x'));
c.Add("k04", std::string(200000, 'x'));
c.Add("k05", std::string(300000, 'x'));
c.Add("k06", "hello3");
c.Add("k07", std::string(100000, 'x'));
std::vector<std::string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool CompressionSupported(CompressionType type) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
if (type == kSnappyCompression) {
return port::Snappy_Compress(in.data(), in.size(), &out);
} else if (type == kZstdCompression) {
return port::Zstd_Compress(1, in.data(), in.size(), &out);
}
return false;
}
class CompressionTableTest
: public ::testing::TestWithParam<std::tuple<CompressionType>> {};
INSTANTIATE_TEST_SUITE_P(CompressionTests, CompressionTableTest,
::testing::Values(kSnappyCompression,
kZstdCompression));
TEST_P(CompressionTableTest, ApproximateOffsetOfCompressed) {
CompressionType type = ::testing::get<0>(GetParam());
if (!CompressionSupported(type)) {
GTEST_SKIP() << "skipping compression test: " << type;
}
Random rnd(301);
TableConstructor c(BytewiseComparator());
std::string tmp;
c.Add("k01", "hello");
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
c.Add("k03", "hello3");
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
std::vector<std::string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = type;
c.Finish(options, &keys, &kvmap);
static const int kSlop = 1000;
const int expected = 2500;
const int min_z = expected - kSlop;
const int max_z = expected + kSlop;
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, kSlop));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, kSlop));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, kSlop));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), min_z, max_z));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), min_z, max_z));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 2 * min_z, 2 * max_z));
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/table.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/table_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
e71a6411-3f88-4818-900f-d9de1dcb9e52 | cpp | tensorflow/tensorflow | xla_activity_listener | tensorflow/compiler/jit/xla_activity_listener.cc | tensorflow/compiler/jit/xla_activity_listener_test.cc | #include "tensorflow/compiler/jit/xla_activity_listener.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace {
struct XlaActivityListenerList {
absl::Mutex mutex;
std::vector<std::unique_ptr<XlaActivityListener>> listeners
TF_GUARDED_BY(mutex);
};
void FlushAllListeners();
XlaActivityListenerList* GetXlaActivityListenerList() {
static XlaActivityListenerList* listener_list = new XlaActivityListenerList;
static int unused = std::atexit(FlushAllListeners);
(void)unused;
return listener_list;
}
template <typename FnTy>
Status ForEachListener(FnTy fn) {
XlaActivityListenerList* listener_list = GetXlaActivityListenerList();
absl::ReaderMutexLock reader_lock(&listener_list->mutex);
for (const std::unique_ptr<XlaActivityListener>& listener :
listener_list->listeners) {
TF_RETURN_IF_ERROR(fn(listener.get()));
}
return absl::OkStatus();
}
void FlushAllListeners() {
Status s = ForEachListener([](XlaActivityListener* listener) {
listener->Flush();
return absl::OkStatus();
});
CHECK(s.ok());
}
}
Status BroadcastXlaActivity(
XlaAutoClusteringActivity auto_clustering_activity) {
return ForEachListener([&](XlaActivityListener* listener) {
return listener->Listen(auto_clustering_activity);
});
}
Status BroadcastXlaActivity(
XlaJitCompilationActivity jit_compilation_activity) {
return ForEachListener([&](XlaActivityListener* listener) {
return listener->Listen(jit_compilation_activity);
});
}
Status BroadcastOptimizationRemark(XlaOptimizationRemark optimization_remark) {
VLOG(2) << "OptimizationRemark: " << optimization_remark.DebugString();
return ForEachListener([&](XlaActivityListener* listener) {
return listener->Listen(optimization_remark);
});
}
Status BroadcastOptimizationRemark(
XlaOptimizationRemark::Warning optimization_warning,
string debug_information) {
XlaOptimizationRemark remark;
remark.set_warning(optimization_warning);
remark.set_debug_information(std::move(debug_information));
return BroadcastOptimizationRemark(std::move(remark));
}
void RegisterXlaActivityListener(
std::unique_ptr<XlaActivityListener> listener) {
XlaActivityListenerList* listener_list = GetXlaActivityListenerList();
absl::WriterMutexLock writer_lock(&listener_list->mutex);
listener_list->listeners.push_back(std::move(listener));
}
void XlaActivityListener::Flush() {}
XlaActivityListener::~XlaActivityListener() {}
} | #include "tensorflow/compiler/jit/xla_activity_listener.h"
#include <cstdlib>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/common_runtime/direct_session.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class TestListener : public XlaActivityListener {
public:
Status Listen(
const XlaAutoClusteringActivity& auto_clustering_activity) override {
auto_clustering_activity_ = auto_clustering_activity;
return absl::OkStatus();
}
Status Listen(
const XlaJitCompilationActivity& jit_compilation_activity) override {
jit_compilation_activity_ = jit_compilation_activity;
return absl::OkStatus();
}
Status Listen(const XlaOptimizationRemark& optimization_remark) override {
return absl::OkStatus();
}
~TestListener() override {}
const XlaAutoClusteringActivity& auto_clustering_activity() const {
return auto_clustering_activity_;
}
const XlaJitCompilationActivity& jit_compilation_activity() const {
return jit_compilation_activity_;
}
private:
XlaAutoClusteringActivity auto_clustering_activity_;
XlaJitCompilationActivity jit_compilation_activity_;
};
class XlaActivityListenerTest : public ::testing::Test {
protected:
XlaActivityListenerTest() {
auto listener = std::make_unique<TestListener>();
listener_ = listener.get();
RegisterXlaActivityListener(std::move(listener));
}
TestListener* listener() const { return listener_; }
private:
TestListener* listener_;
};
GraphDef CreateGraphDef() {
Scope root = Scope::NewRootScope().ExitOnError().WithAssignedDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
for (int i = 0; i < 5; i++) {
a = ops::MatMul(root.WithOpName(absl::StrCat("matmul_", i)), a, a);
a = ops::Add(root.WithOpName(absl::StrCat("add_", i)), a, a);
}
GraphDef graph_def;
root.graph()->ToGraphDef(&graph_def);
return graph_def;
}
TEST_F(XlaActivityListenerTest, Test) {
GraphDef graph_def = CreateGraphDef();
SessionOptions options;
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
std::unique_ptr<Session> session(NewSession(options));
TF_ASSERT_OK(session->Create(graph_def));
std::vector<std::string> output_names = {std::string("add_4:0")};
Tensor tensor_2x2(DT_FLOAT, TensorShape({2, 2}));
for (int i = 0; i < 4; i++) {
tensor_2x2.matrix<float>()(i / 2, i % 2) = 5 * i;
}
Tensor tensor_3x3(DT_FLOAT, TensorShape({3, 3}));
for (int i = 0; i < 9; i++) {
tensor_3x3.matrix<float>()(i / 3, i % 3) = 5 * i;
}
std::vector<std::pair<string, Tensor>> inputs_2x2 = {{"A", tensor_2x2}};
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(inputs_2x2, output_names,
{}, &outputs));
XlaAutoClusteringActivity expected_auto_clustering_activity;
protobuf::TextFormat::ParseFromString(
R"(global_jit_level: ON_2
cpu_global_jit_enabled: true
summary {
unclustered_node_count: 4
clustered_node_count: 14
clusters {
name: "cluster_0"
size: 14
op_histogram {
op: "Add"
count: 1
}
op_histogram {
op: "Const"
count: 4
}
op_histogram {
op: "MatMul"
count: 5
}
op_histogram {
op: "Mul"
count: 4
}
}
unclustered_op_histogram {
op: "NoOp"
count: 2
}
unclustered_op_histogram {
op: "_Arg"
count: 1
}
unclustered_op_histogram {
op: "_Retval"
count: 1
}
}
)",
&expected_auto_clustering_activity);
EXPECT_EQ(listener()->auto_clustering_activity().DebugString(),
expected_auto_clustering_activity.DebugString());
EXPECT_EQ(listener()->jit_compilation_activity().cluster_name(), "cluster_0");
EXPECT_EQ(listener()->jit_compilation_activity().compile_count(), 1);
int64_t first_compile_time =
listener()->jit_compilation_activity().compile_time_us();
EXPECT_GT(first_compile_time, 0);
EXPECT_EQ(listener()->jit_compilation_activity().cumulative_compile_time_us(),
first_compile_time);
std::vector<std::pair<string, Tensor>> inputs_3x3 = {{"A", tensor_3x3}};
outputs.clear();
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK(session->Run(inputs_3x3, output_names,
{}, &outputs));
}
EXPECT_EQ(listener()->jit_compilation_activity().cluster_name(), "cluster_0");
EXPECT_EQ(listener()->jit_compilation_activity().compile_count(), 2);
EXPECT_GT(listener()->jit_compilation_activity().compile_time_us(), 0);
EXPECT_EQ(listener()->jit_compilation_activity().cumulative_compile_time_us(),
first_compile_time +
listener()->jit_compilation_activity().compile_time_us());
}
}
}
int main(int argc, char** argv) {
tensorflow::GetMarkForCompilationPassFlags()->tf_xla_cpu_global_jit = true;
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_activity_listener.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_activity_listener_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
effcb6f2-119c-4733-ac16-417d8c58d04c | cpp | tensorflow/tensorflow | tensor_map | tensorflow/core/kernels/tensor_map.cc | tensorflow/core/kernels/tensor_map_test.cc | #include "tensorflow/core/kernels/tensor_map.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/lib/core/coding.h"
namespace tensorflow {
TensorMap::~TensorMap() {
if (tensors_) tensors_->Unref();
}
void TensorMap::Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
absl::flat_hash_map<TensorKey, Tensor>::const_iterator map_it =
tensors().begin();
while (map_it != tensors().end()) {
Tensor k = map_it->first;
Tensor v = map_it->second;
CHECK_NE(k.dtype(), DT_INVALID);
CHECK_NE(v.dtype(), DT_INVALID);
*data->add_tensors() = k;
*data->add_tensors() = v;
map_it++;
}
}
static Status TensorMapDeviceCopy(
const TensorMap& from, TensorMap* to,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) {
for (const std::pair<TensorKey, Tensor>& p : from.tensors()) {
TensorKey to_key(p.first.dtype());
Tensor to_val(p.second.dtype());
TF_RETURN_IF_ERROR(copy(p.first, &to_key));
TF_RETURN_IF_ERROR(copy(p.second, &to_val));
to->tensors().emplace(to_key, to_val);
}
return absl::OkStatus();
}
#define REGISTER_LIST_COPY(DIRECTION) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION(TensorMap, DIRECTION, \
TensorMapDeviceCopy)
REGISTER_LIST_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE);
REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST);
REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE);
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(TensorMap, TensorMap::kTypeName);
bool TensorMap::Decode(const VariantTensorData& data) {
std::vector<Tensor>::const_iterator tensors_it = data.tensors().begin();
while (tensors_it != data.tensors().end()) {
if (std::next(tensors_it) == data.tensors().end()) {
return false;
}
tensors().emplace(tensors_it[0], tensors_it[1]);
tensors_it += 2;
}
return true;
}
const char TensorMap::kTypeName[] = "tensorflow::TensorMap";
} | #include "tensorflow/core/kernels/tensor_map.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(TensorMapTest, Empty) {
TensorMap tm;
EXPECT_EQ(tm.tensors().size(), 0);
EXPECT_EQ(tm.tensors().begin(), tm.tensors().end());
}
TEST(TensorKeyTest, Equal) {
TensorKey k1 = Tensor(15);
TensorKey k2 = Tensor(15);
EXPECT_EQ(k1, k2);
EXPECT_EQ(k1.shape(), k2.shape());
EXPECT_EQ(k1.dtype(), k2.dtype());
TensorKey k3 = Tensor(37.0);
EXPECT_NE(k1, k3);
EXPECT_NE(k1.dtype(), k3.dtype());
}
TEST(TensorMapTest, Insert) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
absl::flat_hash_map<TensorKey, Tensor> am;
am.try_emplace(k, v);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it =
tm.tensors().begin();
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v);
map_it++;
EXPECT_EQ(map_it, tm.tensors().end());
}
TEST(TensorMapTest, Lookup) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
Tensor f = map_it->second;
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(f, v);
}
TEST(TensorMapTest, Erase) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
tm.erase(k);
EXPECT_EQ(tm.find(k), tm.tensors().end());
}
TEST(TensorMapTest, SameKeyInsert) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v1 = Tensor(22);
Tensor v2 = Tensor(23);
bool b1 = tm.insert(k, v1);
bool b2 = tm.insert(k, v2);
EXPECT_EQ(b1, true);
EXPECT_EQ(b2, false);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v1);
}
TEST(TensorMapTest, Replace) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v1 = Tensor(22);
Tensor v2 = Tensor(23);
tm[k] = v2;
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v2);
}
TEST(TensorMapTest, ListKeys) {
TensorMap tm;
TensorKey k = Tensor(11.0);
TensorKey k2 = Tensor(12.0);
Tensor v = Tensor(22);
Tensor v2 = Tensor(23);
tm.insert(k, v);
tm.insert(k2, v2);
std::vector<Tensor> keys = tm.keys();
std::vector<std::pair<double, int>> key_doubles;
for (int i = 0; i < keys.size(); i++) {
double x = keys[i].scalar<double>()();
std::pair<double, int> p = std::pair<double, int>(x, i);
key_doubles.push_back(p);
}
sort(key_doubles.begin(), key_doubles.end());
EXPECT_EQ(keys.size(), 2);
EXPECT_EQ(key_doubles[0].first, 11.0);
EXPECT_EQ(key_doubles[1].first, 12.0);
int ind1 = key_doubles[0].second;
int ind2 = key_doubles[1].second;
EXPECT_EQ(keys[ind1].shape(), k.shape());
EXPECT_EQ(keys[ind2].shape(), k2.shape());
}
TEST(TensorMapTest, Size) {
TensorMap tm;
EXPECT_EQ(tm.size(), 0);
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
EXPECT_EQ(tm.size(), 1);
}
TEST(TensorMapTest, Copy) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
TensorMap tmc = tm.Copy();
EXPECT_EQ(tm.size(), tmc.size());
EXPECT_NE(tm.find(k), tm.tensors().end());
EXPECT_NE(tmc.find(k), tmc.tensors().end());
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
}
TEST(TensorMapTest, EncodeDecode) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
VariantTensorData data;
tm.Encode(&data);
TensorMap tmc;
tmc.Decode(data);
EXPECT_EQ(tm.size(), tmc.size());
EXPECT_NE(tm.find(k), tm.tensors().end());
EXPECT_NE(tmc.find(k), tmc.tensors().end());
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/tensor_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91af592b-fba7-4979-b8ef-f4946b94eef3 | cpp | tensorflow/tensorflow | threadpool_device | tensorflow/core/common_runtime/threadpool_device.cc | tensorflow/core/common_runtime/threadpool_device_test.cc | #if defined(ENABLE_ONEDNN_OPENMP) && defined(ENABLE_MKL) && defined(_OPENMP)
#ifndef DNNL_AARCH64_USE_ACL
#include "external/llvm_openmp/include/omp.h"
#define EIGEN_DONT_PARALLELIZE
#else
#include "omp.h"
#endif
#endif
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/allocator_registry.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/port.h"
#include "tensorflow/core/util/util.h"
#ifdef INTEL_MKL
#include "tensorflow/core/common_runtime/mkl_cpu_allocator.h"
#include "tensorflow/core/platform/cpu_info.h"
#endif
namespace tensorflow {
ThreadPoolDevice::ThreadPoolDevice(const SessionOptions& options,
const string& name, Bytes memory_limit,
const DeviceLocality& locality,
Allocator* allocator)
: LocalDevice(options, Device::BuildDeviceAttributes(
name, DEVICE_CPU, memory_limit, locality)),
allocator_(allocator),
scoped_allocator_mgr_(new ScopedAllocatorMgr(name)) {
auto s = NodeFileWriter::GetNodeFileWriterIfEnabled(name, env());
if (!s.ok()) {
LOG(ERROR) << s.status();
} else {
node_file_writer_ = *s;
if (node_file_writer_) {
LOG(INFO) << "Writing NodeDefs to file: "
<< node_file_writer_->filename();
}
}
#if defined(ENABLE_ONEDNN_OPENMP) && defined(INTEL_MKL)
if (!IsMKLEnabled()) return;
#ifdef _OPENMP
const char* user_omp_threads = getenv("OMP_NUM_THREADS");
static absl::once_flag num_threads_setting_flag;
if (user_omp_threads == nullptr) {
const int mkl_intra_op = port::NumSchedulableCPUs();
const int ht = port::NumHyperthreadsPerCore();
absl::call_once(num_threads_setting_flag, omp_set_num_threads,
(mkl_intra_op + ht - 1) / ht);
}
#ifndef DNNL_AARCH64_USE_ACL
const char* user_kmp_blocktime = getenv("KMP_BLOCKTIME");
static absl::once_flag blocktime_setting_flag;
if (user_kmp_blocktime == nullptr) {
absl::call_once(blocktime_setting_flag, kmp_set_blocktime, 1);
}
#endif
#endif
#endif
}
ThreadPoolDevice::~ThreadPoolDevice() {}
Allocator* ThreadPoolDevice::GetAllocator(AllocatorAttributes attr) {
return allocator_;
}
Allocator* ThreadPoolDevice::GetScopedAllocator(AllocatorAttributes attr,
int64_t step_id) {
if (attr.scope_id > 0) {
return scoped_allocator_mgr_->GetContainer(step_id)->GetInstance(
attr.scope_id);
}
LOG(FATAL) << "Unexpected call to ThreadPoolDevice::GetScopedAllocator "
<< "attr.scope_id = " << attr.scope_id;
return allocator_;
}
Status ThreadPoolDevice::MakeTensorFromProto(
const TensorProto& tensor_proto, const AllocatorAttributes alloc_attrs,
Tensor* tensor) {
if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
Tensor parsed(tensor_proto.dtype());
if (parsed.FromProto(allocator_, tensor_proto)) {
*tensor = std::move(parsed);
return absl::OkStatus();
}
}
return errors::InvalidArgument("Cannot parse tensor from proto: ",
tensor_proto.DebugString());
}
void ThreadPoolDevice::CopyTensorInSameDevice(
const Tensor* input_tensor, Tensor* output_tensor,
const DeviceContext* device_context, StatusCallback done) {
if (input_tensor->NumElements() != output_tensor->NumElements()) {
done(errors::Internal(
"CPU->CPU copy shape mismatch: input=", input_tensor->shape(),
", output=", output_tensor->shape()));
return;
}
tensor::DeepCopy(*input_tensor, output_tensor);
done(absl::OkStatus());
}
namespace {
const absl::flat_hash_set<std::string>* GetOpsToLogFromEnv() {
auto* result = new absl::flat_hash_set<std::string>;
const char* env = getenv("TF_CPU_DEBUG_OPS_TO_LOG");
if (!env) {
return result;
}
std::vector<absl::string_view> ops = absl::StrSplit(env, ',');
LOG(INFO) << "Will log inputs & outputs from the following ops: ";
for (absl::string_view op : ops) {
result->insert(std::string(op));
LOG(INFO) << " |" << op << "|";
}
return result;
}
bool ShouldLogInputsAndOutputs(OpKernel* op_kernel) {
static const absl::flat_hash_set<std::string>& ops_to_log =
*GetOpsToLogFromEnv();
static const bool is_empty = ops_to_log.empty();
if (is_empty) {
return false;
}
return ops_to_log.count(op_kernel->type_string());
}
}
void ThreadPoolDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
bool should_log_inputs_and_outputs = ShouldLogInputsAndOutputs(op_kernel);
if (should_log_inputs_and_outputs) {
LogInputs(op_kernel, context);
}
op_kernel->Compute(context);
if (context->status().ok() && node_file_writer_) {
Status s = node_file_writer_->RecordNodeExecution(op_kernel, context);
if (!s.ok()) {
LOG(ERROR) << s;
context->SetStatus(s);
}
}
if (should_log_inputs_and_outputs) {
LogOutputs(op_kernel, context);
}
}
void ThreadPoolDevice::ComputeAsync(AsyncOpKernel* op_kernel,
OpKernelContext* context,
AsyncOpKernel::DoneCallback done) {
bool should_log_inputs_and_outputs = ShouldLogInputsAndOutputs(op_kernel);
if (should_log_inputs_and_outputs) {
LogInputs(op_kernel, context);
AsyncOpKernel::DoneCallback parent_done = done;
done = [this, parent_done, op_kernel, context]() {
LogOutputs(op_kernel, context);
parent_done();
};
}
op_kernel->ComputeAsync(context, done);
}
void ThreadPoolDevice::LogInputs(OpKernel* op_kernel,
OpKernelContext* context) {
LOG(INFO) << "Inputs for " << op_kernel->name() << " (total "
<< context->num_inputs() << "):";
for (int i = 0; i < context->num_inputs(); i++) {
if (!context->has_input(i)) {
LOG(INFO) << "input # " << i << " is absent";
continue;
}
LOG(INFO) << "input # " << i;
LOG(INFO) << context->input(i).DebugString(-1);
}
LOG(INFO) << "";
}
void ThreadPoolDevice::LogOutputs(OpKernel* op_kernel,
OpKernelContext* context) {
if (!context->status().ok()) {
LOG(INFO) << op_kernel->name()
<< " failed: " << context->status().message();
return;
}
LOG(INFO) << "Outputs for " << op_kernel->name() << " (total "
<< context->num_inputs() << "):";
for (int i = 0; i < context->num_outputs(); i++) {
Tensor* output = context->mutable_output(i);
if (output == nullptr) {
LOG(INFO) << "output # " << i << " is null";
} else {
LOG(INFO) << "output # " << i;
LOG(INFO) << output->DebugString(-1);
}
}
LOG(INFO) << "";
}
#ifdef INTEL_MKL
namespace {
class MklCPUAllocatorFactory : public AllocatorFactory {
public:
bool NumaEnabled() override { return false; }
Allocator* CreateAllocator() override { return new MklCPUAllocator; }
virtual SubAllocator* CreateSubAllocator(int numa_node) {
return new MklSubAllocator;
}
};
REGISTER_MEM_ALLOCATOR("MklCPUAllocator",
((IsMKLEnabled() || IsZenDnnEnabled()) ? 200 : 50),
MklCPUAllocatorFactory);
}
#endif
} | #include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
const int kDimSize = 2;
void InitTensor(Tensor* tensor, float value) {
auto eigen_tensor = tensor->tensor<float, kDimSize>();
for (int i = 0; i < kDimSize; ++i) {
for (int j = 0; j < kDimSize; ++j) {
eigen_tensor(i, j) = value;
}
}
}
bool Equal(const Tensor& tensor1, const Tensor& tensor2) {
auto eigen_tensor1 = tensor1.tensor<float, kDimSize>();
auto eigen_tensor2 = tensor2.tensor<float, kDimSize>();
for (int i = 0; i < kDimSize; ++i) {
for (int j = 0; j < kDimSize; ++j) {
if (eigen_tensor1(i, j) != eigen_tensor2(i, j)) {
return false;
}
}
}
return true;
}
TEST(ThreadPoolDeviceTest, CopyTensor) {
Tensor input(DT_FLOAT, TensorShape({kDimSize, kDimSize}));
Tensor output(DT_FLOAT, TensorShape({kDimSize, kDimSize}));
InitTensor(&input, 1);
InitTensor(&output, 0);
ASSERT_FALSE(Equal(input, output));
ThreadPoolDevice device(SessionOptions(), "/device:CPU:0", Bytes(256),
DeviceLocality(), cpu_allocator());
DeviceContext* device_context = new DeviceContext;
Notification note;
device.CopyTensorInSameDevice(&input, &output, device_context,
[¬e](const Status& s) {
TF_ASSERT_OK(s);
note.Notify();
});
note.WaitForNotification();
ASSERT_TRUE(Equal(input, output));
device_context->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/threadpool_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/threadpool_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee76d99d-3019-456e-84c1-282e1460ac56 | cpp | tensorflow/tensorflow | remove_attribute | tensorflow/tools/graph_transforms/remove_attribute.cc | tensorflow/tools/graph_transforms/remove_attribute_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RemoveAttribute(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
if (!context.params.count("attribute_name") ||
(context.params.at("attribute_name").size() != 1)) {
return errors::InvalidArgument(
"remove_attribute expects exactly one 'attribute_name' "
"argument, e.g. remove_attribute(op_name=Mul, attribute_name=foo)");
}
string op_name;
if (context.params.count("op_name")) {
if (context.params.at("op_name").size() != 1) {
return errors::InvalidArgument(
"remove_attribute expects a single op_name argument, but found ",
context.params.at("op_name").size());
}
op_name = context.params.at("op_name")[0];
} else {
op_name = "*";
}
const string attribute_name = context.params.at("attribute_name")[0];
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
if (((op_name == "*") || (op_name == node.op())) &&
(node.attr().count(attribute_name))) {
new_node->mutable_attr()->erase(attribute_name);
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("remove_attribute", RemoveAttribute);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RemoveAttribute(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class RemoveAttributeTest : public ::testing::Test {
protected:
void TestRemoveAttribute() {
GraphDef graph_def;
NodeDef* mul_node1 = graph_def.add_node();
mul_node1->set_name("mul_node1");
mul_node1->set_op("Mul");
mul_node1->add_input("add_node2");
mul_node1->add_input("add_node3");
SetNodeAttr<int32>("foo", 23, mul_node1);
SetNodeAttr<string>("bar", "something", mul_node1);
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
SetNodeAttr<int32>("foo", 46, add_node2);
SetNodeAttr<int32>("bob", 23, add_node2);
SetNodeAttr<string>("bar", "something else", add_node2);
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node1");
add_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* add_node4 = graph_def.add_node();
add_node4->set_name("add_node4");
add_node4->set_op("Add");
add_node4->add_input("add_node2");
add_node4->add_input("add_node3");
GraphDef wildcard_result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"mul_node1"};
context.params.insert(
std::pair<string, std::vector<string>>({"op_name", {string("*")}}));
context.params.insert(std::pair<string, std::vector<string>>(
{"attribute_name", {string("foo")}}));
TF_ASSERT_OK(RemoveAttribute(graph_def, context, &wildcard_result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(wildcard_result, &node_lookup);
EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar"));
EXPECT_EQ(0, node_lookup.at("add_node2")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob"));
GraphDef targeted_result;
TransformFuncContext targeted_context;
targeted_context.input_names = {};
targeted_context.output_names = {"mul_node1"};
targeted_context.params.insert(
std::pair<string, std::vector<string>>({"op_name", {string("Mul")}}));
targeted_context.params.insert(std::pair<string, std::vector<string>>(
{"attribute_name", {string("foo")}}));
TF_ASSERT_OK(
RemoveAttribute(graph_def, targeted_context, &targeted_result));
MapNamesToNodes(targeted_result, &node_lookup);
EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob"));
}
};
TEST_F(RemoveAttributeTest, TestRemoveAttribute) { TestRemoveAttribute(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_attribute.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_attribute_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a2dfbbc-53b8-4988-9ab6-73c85b8aa912 | cpp | tensorflow/tensorflow | cudnn_support_utils | third_party/xla/xla/service/gpu/cudnn_support_utils.cc | third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc | #include "xla/service/gpu/cudnn_support_utils.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv));
const Shape& input_shape = conv.operand(0)->shape();
const Shape& kernel_shape = conv.operand(1)->shape();
const Shape& result_shape = conv.shape().tuple_shapes(0);
const auto& dnums = conv.convolution_dimension_numbers();
if (vector_size != 4 && vector_size != 32) {
VLOG(3) << "Unsupported vector size for integer convolution: "
<< vector_size;
return false;
}
if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) ||
!compute_capability.IsAtLeast(6, 1)) {
VLOG(3) << "Compute capability " << compute_capability.ToString()
<< " is not sufficent for int8x" << vector_size
<< " vectorization.";
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
VLOG(3) << "Convolution kind is not forward or foward-activation: "
<< conv.ToString();
return false;
}
if (!primitive_util::IsIntegralType(input_shape.element_type()) ||
!primitive_util::IsIntegralType(kernel_shape.element_type())) {
VLOG(3) << "Convolution does not accept integer inputs/weights: "
<< conv.ToString();
return false;
}
if (dnums.input_spatial_dimensions().size() != 2 ||
dnums.kernel_spatial_dimensions().size() != 2 ||
dnums.output_spatial_dimensions().size() != 2) {
VLOG(3) << "Convolution is not 2D: " << conv.ToString();
return false;
}
if (vector_size == 32 &&
!primitive_util::IsIntegralType(result_shape.element_type())) {
VLOG(3) << "int8x32 convolutions only support integer output: "
<< conv.ToString();
return false;
}
if (vector_size == 32) {
int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]);
int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]);
int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]);
int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]);
const int64_t dilationW = conv.window().dimensions()[0].base_dilation();
const int64_t dilationH = conv.window().dimensions()[1].base_dilation();
if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) {
VLOG(3) << "Conv spatial filter/input dimensions are too small for "
"vecotrized int8x32 convolution: "
<< conv.ToString();
return false;
}
}
if (window_util::HasDilation(conv.window())) {
VLOG(3) << "Vectorized integer convolutions do not support dilation: "
<< conv.ToString();
return false;
}
return true;
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) {
if (shape.rank() != 4 && shape.rank() != 5) {
return Internal("Filter shape has unexpected rank.");
}
const int64_t dO = dimension_numbers.kernel_output_feature_dimension();
const int64_t dI = dimension_numbers.kernel_input_feature_dimension();
const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0);
const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1);
bool revectorize = shape.rank() == 5;
const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1;
const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1;
if (shape.dimensions(dO) % 32 != 0 ||
shape.dimensions(dI) % (32 / vsize) != 0 ||
(revectorize && vsize != 4 && vsize != 32)) {
return Internal("Filter shape is not vectorizable.");
}
std::vector<int64_t> output = {
shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize),
shape.dimensions(dH), shape.dimensions(dW), 32};
Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output);
auto calc_index = [&](int dim) {
bool split_v = vsize == 32;
return (revectorize
? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0)
: (dI < dim ? 3 : 0)) +
(dO < dim ? 3 : 0) + (dH < dim) + (dW < dim);
};
int idx_O = calc_index(dO);
int idx_I = calc_index(dI);
int idx_H = calc_index(dH);
int idx_W = calc_index(dW);
int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1;
int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2;
std::vector<int64_t> dims(8);
dims[idx_O] = shape.dimensions(dO) / 8;
dims[idx_O + 1] = 4;
dims[idx_O + 2] = 2;
dims[idx_I] = shape.dimensions(dI) / (32 / vsize);
dims[idx_Y] = 8;
dims[idx_Z] = 4;
dims[idx_H] = shape.dimensions(dH);
dims[idx_W] = shape.dimensions(dW);
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {idx_I, idx_H, idx_W, idx_O,
idx_O + 2, idx_Y, idx_O + 1, idx_Z};
return CudnnReorderTransposeConfig{split_shape, output_shape, permutation};
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape) {
if (shape.rank() != 1) {
return Internal("Bias shape has unexpected rank.");
}
if (shape.dimensions(0) % 32 != 0) {
return Internal("Bias shape is not vectorizable.");
}
std::vector<int64_t> dims = {shape.dimensions(0) / 32, 4, 2, 4};
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {0, 2, 1, 3};
return CudnnReorderTransposeConfig{split_shape, shape, permutation};
}
bool IsWorkspaceAllocationRoot(const HloInstruction& root) {
return root.IsRoot() && root.opcode() == HloOpcode::kTuple &&
root.operand_count() == 2 &&
root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) &&
root.operand(1)->operand_count() == 0;
}
}
} | #include "xla/service/gpu/cudnn_support_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class CudnnSupportUtilsTest : public HloTestBase {
public:
absl::StatusOr<HloCustomCallInstruction*> GetCustomCall(
xla::VerifiedHloModule* module, absl::string_view target) {
HloCustomCallInstruction* call = nullptr;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->IsCustomCall(target)) {
VLOG(1) << inst->ToString();
if (call != nullptr) {
return tsl::errors::FailedPrecondition(
"Found more than one custom call.");
}
call = Cast<HloCustomCallInstruction>(inst);
}
}
}
if (call == nullptr) {
return tsl::errors::FailedPrecondition(
"Did not find any matching custom call.");
}
return call;
}
};
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckKind) {
auto moduleFwd = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleFwd.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleBwdFilter = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdFilter.get(), "__cudnn$convBackwardFilter"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleBwdInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdInput.get(), "__cudnn$convBackwardInput"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) {
auto moduleS8InOut = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InOut.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f32[32,10,10,64] parameter(0)
filter = f32[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleF32InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,10,64] parameter(0)
filter = s8[2,2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b012f_012io->b012f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter),
window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) {
auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,2,2,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,3,3,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(moduleFilterAlmostCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
class ReorderFilterRank4Test : public ::testing::TestWithParam<std::string> {};
TEST_P(ReorderFilterRank4Test, InferTransposeRank4) {
auto input_dims = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[4] = {0, 0, 0, 0};
shape_dims[dI] = 224;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1);
}
std::vector<std::string> GeneratePermutations(std::string input_dims) {
std::sort(input_dims.begin(), input_dims.end());
std::vector<std::string> permutations;
do {
permutations.push_back(input_dims);
} while (std::next_permutation(input_dims.begin(), input_dims.end()));
return permutations;
}
INSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test,
::testing::ValuesIn(GeneratePermutations("01io")));
class ReorderFilterRank5Test
: public ::testing::TestWithParam<std::tuple<std::string, int>> {};
TEST_P(ReorderFilterRank5Test, InferTransposeRank5) {
auto [input_dims, vsize] = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize};
shape_dims[dI] = 224 / vsize;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
}
INSTANTIATE_TEST_SUITE_P(
ReorderTestSuite, ReorderFilterRank5Test,
::testing::Combine(::testing::ValuesIn(GeneratePermutations("01?io")),
::testing::Values(4, 32)));
class ReorderBiasTest : public ::testing::Test {};
TEST_F(ReorderBiasTest, InferTranspose) {
Shape shape = ShapeUtil::MakeShape(U8, {96});
auto bias = HloInstruction::CreateParameter(2, shape, "bias");
Shape unused = ShapeUtil::MakeNil();
auto input = HloInstruction::CreateParameter(0, unused, "input");
auto filter = HloInstruction::CreateParameter(1, unused, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForBiasReordering(shape));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4));
EXPECT_EQ(inferred_config.permutation[2], 1);
EXPECT_EQ(inferred_config.permutation[3], 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35322236-752a-4ec4-8d6f-3ed654be6fab | cpp | google/tsl | scoped_annotation | tsl/profiler/lib/scoped_annotation.h | tsl/profiler/lib/scoped_annotation_test.cc | #ifndef TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
#define TENSORFLOW_TSL_PROFILER_LIB_SCOPED_ANNOTATION_H_
#include <stddef.h>
#include <atomic>
#include <string>
#include <string_view>
#include <utility>
#include "tsl/platform/macros.h"
#include "tsl/platform/platform.h"
#include "tsl/profiler/lib/nvtx_utils.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "xla/tsl/profiler/backends/cpu/annotation_stack.h"
#endif
namespace tsl::profiler {
template <typename T>
void PushAnnotation(const T& generator) {
if (auto domain = DefaultProfilerDomain();
TF_PREDICT_FALSE(domain != nullptr)) {
RangePush(domain, generator());
return;
}
#if !defined(IS_MOBILE_PLATFORM)
if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
AnnotationStack::PushAnnotation(static_cast<std::string_view>(generator()));
}
#endif
}
inline void PushAnnotation(const char* name) {
PushAnnotation([&] { return name; });
}
inline void PushAnnotation(const std::string& name) {
PushAnnotation([&] { return name; });
}
inline void PopAnnotation() {
std::atomic_thread_fence(std::memory_order_acquire);
if (auto domain = DefaultProfilerDomain();
TF_PREDICT_FALSE(domain != nullptr)) {
RangePop(domain);
return;
}
#if !defined(IS_MOBILE_PLATFORM)
if (TF_PREDICT_FALSE(AnnotationStack::IsEnabled())) {
AnnotationStack::PopAnnotation();
}
#endif
}
class ScopedAnnotation {
public:
template <typename T>
explicit ScopedAnnotation(T&& annotation) {
PushAnnotation(std::forward<T>(annotation));
}
~ScopedAnnotation() { PopAnnotation(); }
static bool IsEnabled() {
#if !defined(IS_MOBILE_PLATFORM)
return AnnotationStack::IsEnabled();
#else
return false;
#endif
}
private:
ScopedAnnotation(const ScopedAnnotation&) = delete;
ScopedAnnotation& operator=(const ScopedAnnotation&) = delete;
};
}
#endif | #include "tsl/profiler/lib/scoped_annotation.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "xla/tsl/profiler/backends/cpu/annotation_stack.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ScopedAnnotation, Simple) {
{
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "");
}
{
AnnotationStack::Enable(true);
ScopedAnnotation trace("blah");
EXPECT_EQ(AnnotationStack::Get(), "blah");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
ScopedAnnotation outer("foo");
ScopedAnnotation inner("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
AnnotationStack::Enable(false);
}
{
AnnotationStack::Enable(true);
PushAnnotation("foo");
PushAnnotation("bar");
EXPECT_EQ(AnnotationStack::Get(), "foo::bar");
PopAnnotation();
PopAnnotation();
AnnotationStack::Enable(false);
}
EXPECT_EQ(AnnotationStack::Get(), "");
}
std::string GenerateRandomString(int length) {
return std::string(length, 'a');
}
void BM_ScopedAnnotationDisabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
}
BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Nested(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
for (auto s : state) {
ScopedAnnotation trace(annotation);
{ ScopedAnnotation trace(annotation); }
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Adhoc(::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
int i = 0;
for (auto s : state) {
ScopedAnnotation trace(absl::StrCat(i, "-", i * i));
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc);
void BM_ScopedAnnotationDisabled_Lambda(::testing::benchmark::State& state) {
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
}
BENCHMARK(BM_ScopedAnnotationDisabled_Lambda);
void BM_ScopedAnnotationEnabled_Adhoc_Lambda(
::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/profiler/lib/scoped_annotation.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/profiler/lib/scoped_annotation_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
df3a25ed-d89e-4a8e-9675-4e294c559107 | cpp | tensorflow/tensorflow | shape_partition | third_party/xla/xla/service/cpu/shape_partition.cc | third_party/xla/xla/service/cpu/shape_partition_test.cc | #include "xla/service/cpu/shape_partition.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <utility>
#include <vector>
namespace xla {
namespace cpu {
std::vector<int64_t> ShapePartitionAssigner::Run(
int64_t target_partition_count) {
std::vector<int64_t> outer_dims;
int64_t outer_dim_size = 1;
for (int i = shape_.layout().minor_to_major_size() - 1; i >= 0; --i) {
const int64_t dimension = shape_.layout().minor_to_major(i);
outer_dims.push_back(dimension);
outer_dim_size *= shape_.dimensions(dimension);
if (outer_dim_size >= target_partition_count) {
break;
}
}
target_partition_count = std::min(outer_dim_size, target_partition_count);
const int64_t target_dim_partition_count = std::pow(
static_cast<double>(target_partition_count), 1.0 / outer_dims.size());
std::vector<int64_t> dimension_partition_counts(outer_dims.size());
for (int64_t i = 0; i < outer_dims.size(); ++i) {
dimension_partition_counts[i] =
std::min(static_cast<int64_t>(shape_.dimensions(outer_dims[i])),
target_dim_partition_count);
}
if (GetTotalPartitionCount(dimension_partition_counts) <
target_partition_count) {
for (int64_t i = 0; i < dimension_partition_counts.size(); ++i) {
const int64_t current_dim_partition_count = dimension_partition_counts[i];
const int64_t other_dims_partition_count =
GetTotalPartitionCount(dimension_partition_counts) /
current_dim_partition_count;
int64_t additional_partition_count =
target_partition_count / other_dims_partition_count -
current_dim_partition_count;
additional_partition_count = std::min(
shape_.dimensions(outer_dims[i]) - dimension_partition_counts[i],
additional_partition_count);
if (additional_partition_count > 0) {
dimension_partition_counts[i] += additional_partition_count;
}
}
}
return dimension_partition_counts;
}
int64_t ShapePartitionAssigner::GetTotalPartitionCount(
const std::vector<int64_t>& dimension_partition_counts) {
int64_t total_partition_count = 1;
for (int64_t dim_partition_count : dimension_partition_counts) {
total_partition_count *= dim_partition_count;
}
return total_partition_count;
}
ShapePartitionIterator::ShapePartitionIterator(
const Shape& shape, absl::Span<const int64_t> dimension_partition_counts)
: shape_(shape),
dimension_partition_counts_(dimension_partition_counts.begin(),
dimension_partition_counts.end()),
dimensions_(dimension_partition_counts_.size()),
dimension_partition_sizes_(dimension_partition_counts_.size()),
dimension_partition_strides_(dimension_partition_counts_.size()) {
for (int i = 0; i < dimensions_.size(); ++i) {
dimensions_[i] = shape_.layout().minor_to_major(
shape_.layout().minor_to_major_size() - 1 - i);
}
for (int i = 0; i < dimension_partition_sizes_.size(); ++i) {
const int64_t dim_size = shape_.dimensions(dimensions_[i]);
dimension_partition_sizes_[i] =
std::max(int64_t{1}, dim_size / dimension_partition_counts_[i]);
}
dimension_partition_strides_[dimension_partition_strides_.size() - 1] = 1;
for (int i = dimension_partition_strides_.size() - 2; i >= 0; --i) {
dimension_partition_strides_[i] = dimension_partition_strides_[i + 1] *
dimension_partition_counts_[i + 1];
}
}
std::vector<std::pair<int64_t, int64_t>> ShapePartitionIterator::GetPartition(
int64_t index) const {
std::vector<std::pair<int64_t, int64_t>> partition(dimensions_.size());
for (int64_t i = 0; i < partition.size(); ++i) {
const int64_t partition_index = index / dimension_partition_strides_[i];
partition[i].first = partition_index * dimension_partition_sizes_[i];
if (partition_index == dimension_partition_counts_[i] - 1) {
partition[i].second =
shape_.dimensions(dimensions_[i]) - partition[i].first;
} else {
partition[i].second = dimension_partition_sizes_[i];
}
CHECK_GT(partition[i].second, 0);
index -= partition_index * dimension_partition_strides_[i];
}
return partition;
}
int64_t ShapePartitionIterator::GetTotalPartitionCount() const {
return ShapePartitionAssigner::GetTotalPartitionCount(
dimension_partition_counts_);
}
}
} | #include "xla/service/cpu/shape_partition.h"
#include <algorithm>
#include <random>
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
namespace {
class ShapePartitionAssignerTest : public HloTestBase {
protected:
typedef std::vector<int64_t> Vec;
void RunR2Test(const Shape& shape, int64_t max_target_partition_count,
const std::vector<int64_t>* expected_partitions) {
ShapePartitionAssigner assigner(shape);
for (int64_t i = 1; i <= max_target_partition_count; ++i) {
std::vector<int64_t> actual_partitions =
assigner.Run(i);
EXPECT_THAT(actual_partitions, expected_partitions[i - 1]);
}
}
};
TEST_F(ShapePartitionAssignerTest, Shape13WithLayout10) {
std::vector<int64_t> expected_partitions[] = {{1} , {1, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 3}, {1, 0}), 2,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape31WithLayout01) {
std::vector<int64_t> expected_partitions[] = {
{1} , {1, 2}
};
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 1}, {0, 1}), 2,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape53WithLayout10) {
std::vector<int64_t> expected_partitions[] = {{1} , {2} ,
{3} , {4} ,
{5} , {3, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0}), 6,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape53WithLayout01) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {0, 1}), 4,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape532WithLayout210) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {4} ,
{5} , {3, 2} , {3, 2} , {4, 2} ,
{3, 3} , {3, 3} , {3, 3} , {4, 3} ,
{4, 3} , {4, 3} , {5, 3} , {4, 2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0}), 16,
expected_partitions);
}
TEST_F(ShapePartitionAssignerTest, Shape532WithLayout201) {
std::vector<int64_t> expected_partitions[] = {
{1} , {2} , {3} , {2, 2} ,
{2, 2} , {3, 2} , {3, 2} , {3, 2} ,
{3, 3} , {3, 3} , {3, 3} , {3, 4} ,
{3, 4} , {3, 4} , {3, 5} , {3, 2, 2} };
RunR2Test(ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 0, 1}), 16,
expected_partitions);
}
class ShapePartitionIteratorTest : public HloTestBase {
protected:
typedef std::vector<std::pair<int64_t, int64_t>> Partition;
};
TEST_F(ShapePartitionIteratorTest, Shape53WithLayout10) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
{
ShapePartitionIterator iterator(shape, {1});
EXPECT_EQ(1, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 5}}), iterator.GetPartition(0)));
}
{
ShapePartitionIterator iterator(shape, {2});
EXPECT_EQ(2, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 2}}), iterator.GetPartition(0)));
EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(1)));
}
{
ShapePartitionIterator iterator(shape, {3});
EXPECT_EQ(3, iterator.GetTotalPartitionCount());
EXPECT_TRUE(absl::c_equal(Partition({{0, 1}}), iterator.GetPartition(0)));
EXPECT_TRUE(absl::c_equal(Partition({{1, 1}}), iterator.GetPartition(1)));
EXPECT_TRUE(absl::c_equal(Partition({{2, 3}}), iterator.GetPartition(2)));
}
}
TEST_F(ShapePartitionIteratorTest, Shape532WithLayout210) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 1, 0});
{
ShapePartitionIterator iterator(shape, {1, 1});
EXPECT_EQ(1, iterator.GetTotalPartitionCount());
EXPECT_TRUE(
absl::c_equal(Partition({{0, 5}, {0, 3}}), iterator.GetPartition(0)));
}
{
ShapePartitionIterator iterator(shape, {2, 2});
EXPECT_EQ(4, iterator.GetTotalPartitionCount());
EXPECT_TRUE(
absl::c_equal(Partition({{0, 2}, {0, 1}}), iterator.GetPartition(0)));
EXPECT_TRUE(
absl::c_equal(Partition({{0, 2}, {1, 2}}), iterator.GetPartition(1)));
EXPECT_TRUE(
absl::c_equal(Partition({{2, 3}, {0, 1}}), iterator.GetPartition(2)));
EXPECT_TRUE(
absl::c_equal(Partition({{2, 3}, {1, 2}}), iterator.GetPartition(3)));
}
}
class RandomShapePartitionIteratorTest : public HloTestBase {
protected:
typedef std::vector<std::pair<int64_t, int64_t>> Partition;
RandomShapePartitionIteratorTest()
: generator_(rd_()), distribution_(1, 10) {}
std::vector<int64_t> RandR4Dims() { return {Rand(), Rand(), Rand(), Rand()}; }
int64_t Rand() { return distribution_(generator_); }
std::random_device rd_;
std::mt19937 generator_;
std::uniform_int_distribution<int> distribution_;
};
TEST_F(RandomShapePartitionIteratorTest, RandomShapeAndPartitions) {
Shape shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, RandR4Dims(), {3, 2, 1, 0});
const int num_outer_dims_to_partition = 1 + (Rand() % 3);
std::vector<int64_t> dim_sizes(num_outer_dims_to_partition);
std::vector<int64_t> dim_partition_counts(num_outer_dims_to_partition);
int64_t total_dim_size = 1;
for (int i = 0; i < num_outer_dims_to_partition; ++i) {
const int64_t dimension = shape.layout().minor_to_major(
shape.layout().minor_to_major_size() - 1 - i);
dim_sizes[i] = shape.dimensions(dimension);
total_dim_size *= dim_sizes[i];
const int64_t dim_partition_count = 1 + Rand() % dim_sizes[i];
dim_partition_counts[i] = dim_partition_count;
}
std::vector<std::map<int64_t, int64_t>> ranges(num_outer_dims_to_partition);
ShapePartitionIterator partition_iterator(shape, dim_partition_counts);
const int64_t partition_count = partition_iterator.GetTotalPartitionCount();
for (int64_t i = 0; i < partition_count; ++i) {
const auto& dim_partition = partition_iterator.GetPartition(i);
for (int dim = 0; dim < dim_partition.size(); ++dim) {
ranges[dim].insert(
std::make_pair(dim_partition[dim].first,
dim_partition[dim].first + dim_partition[dim].second));
}
}
for (int i = 0; i < ranges.size(); ++i) {
int64_t expected_index = 0;
for (auto& r : ranges[i]) {
EXPECT_EQ(expected_index, r.first);
expected_index = r.second;
}
EXPECT_EQ(expected_index, dim_sizes[i]);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/shape_partition.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/shape_partition_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
850b4321-06d4-460c-9d9b-7b489fe04b50 | cpp | google/tensorstore | container_to_shared | tensorstore/internal/container_to_shared.h | tensorstore/internal/container_to_shared_test.cc | #ifndef TENSORSTORE_INTERNAL_STRING_TO_SHARED_H_
#define TENSORSTORE_INTERNAL_STRING_TO_SHARED_H_
#include <stddef.h>
#include <memory>
#include <utility>
namespace tensorstore {
namespace internal {
template <typename Container>
inline std::shared_ptr<typename Container::value_type>
ContainerToSharedDataPointerWithOffset(Container&& container,
size_t offset = 0) {
auto ptr = std::make_shared<Container>(std::forward<Container>(container));
return std::shared_ptr<typename Container::value_type>(std::move(ptr),
ptr->data() + offset);
}
}
}
#endif | #include "tensorstore/internal/container_to_shared.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::ContainerToSharedDataPointerWithOffset;
TEST(ContainerToSharedDataPointerWithOffsetTest, SmallBuffer) {
std::string small = "hello";
auto ptr = ContainerToSharedDataPointerWithOffset(std::move(small), 2);
small = "aaaaa";
EXPECT_EQ("hello", std::string_view(ptr.get() - 2, 5));
}
TEST(ContainerToSharedDataPointerWithOffsetTest, LargeBuffer) {
std::string large(200, '\0');
for (int i = 0; i < 200; ++i) {
large[i] = i;
}
std::string large_copy = large;
auto* data = large.data();
auto ptr = ContainerToSharedDataPointerWithOffset(std::move(large), 5);
EXPECT_EQ(data + 5, ptr.get());
EXPECT_EQ(large_copy, std::string_view(ptr.get() - 5, 200));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container_to_shared.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container_to_shared_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f6be64f7-9c91-427c-bf2e-79578886c590 | cpp | tensorflow/tensorflow | remove_compression_map | tensorflow/core/grappler/optimizers/data/remove_compression_map.cc | tensorflow/core/grappler/optimizers/data/remove_compression_map_test.cc | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
absl::StatusOr<std::string> GetCompressionFunctionName(const GraphDef& graph) {
for (const auto& function : graph.library().function()) {
for (const auto& node : function.node_def()) {
if (node.op() == "CompressElement") {
return function.signature().name();
}
}
}
return errors::Internal("Compression function not found.");
}
absl::StatusOr<NodeDef> GetCompressionMapNode(const GraphDef& graph) {
TF_ASSIGN_OR_RETURN(std::string compression_function_name,
GetCompressionFunctionName(graph));
for (const auto& node : graph.node()) {
if (node.op() != "ParallelMapDatasetV2") {
continue;
}
if (auto it = node.attr().find("f");
it != node.attr().end() && it->second.has_func() &&
it->second.func().name() == compression_function_name) {
return node;
}
}
return errors::Internal("Compression map node not found.");
}
}
Status RemoveCompressionMap::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
TF_ASSIGN_OR_RETURN(NodeDef compression_map_node,
GetCompressionMapNode(*output));
MutableGraphView graph(output);
for (const auto& compression_map_output :
graph.GetFanout(graph.GetOutputPort(compression_map_node.name(), 0))) {
compression_map_output.node->clear_input();
compression_map_output.node->add_input(compression_map_node.input().Get(0));
++stats->num_changes;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(RemoveCompressionMap, "remove_compression_map");
}
} | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::HasSubstr;
TEST(RemoveCompressionMap, Success) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("Const/_4",
"Const",
{},
{{"dtype", DT_INT64},
{"value", -1}}),
graph_tests_utils::MakeParallelMapV2Node(
"ParallelMapDatasetV2/_5",
"RangeDataset/_3",
"Const/_4",
"__inference_Dataset_map_lambda_10",
"default",
false),
NDef("dataset",
"_Retval",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}})},
{FunctionDefHelper::Create(
"__inference_Dataset_map_lambda_10",
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"CompressElement"},
"CompressElement",
{"args_0"},
{{"input_types", DT_INT64}}},
{{"Identity"},
"Identity",
{"CompressElement:compressed:0"},
{{"T", DT_VARIANT}}},
},
{})});
RemoveCompressionMap optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("dataset", output);
EXPECT_EQ(output.node(index).input(0), "RangeDataset/_3");
}
TEST(RemoveCompressionMap, FailureNoMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef({NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("dataset",
"_Retval",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}})});
RemoveCompressionMap optimizer;
GraphDef output;
ASSERT_THAT(optimizer.Optimize(nullptr, item, &output),
testing::StatusIs(error::INTERNAL,
HasSubstr("Compression function not found.")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/remove_compression_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/remove_compression_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
54bb0901-967f-49c3-8af7-a3fac6672659 | cpp | tensorflow/tensorflow | sparsify_gather | tensorflow/tools/graph_transforms/sparsify_gather.cc | tensorflow/tools/graph_transforms/sparsify_gather_test.cc | #include <cmath>
#include <memory>
#include <unordered_map>
#include "tensorflow/c/checkpoint_reader.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
using str_util::Split;
using str_util::StringReplace;
using strings::StrCat;
namespace graph_transforms {
Status SparsifyWeights(const Tensor& tensor, Tensor* indices_tensor,
Tensor* values_tensor) {
if (tensor.dims() != 2 || tensor.dim_size(1) != 1) {
return tensorflow::errors::FailedPrecondition(
"Transform only applicable to subgraph with 'Const' with "
"tensor of shape [N, 1]. But instead get shape ",
tensor.shape().DebugString(), ".");
}
auto flat = tensor.flat<float>();
std::vector<int64_t> indices;
std::vector<float> values;
for (int64_t i = 0; i < flat.size(); i++) {
float val = flat(i);
if (std::abs(val) >= 1.0e-5) {
indices.push_back(i);
values.push_back(val);
}
}
if (indices.empty() || values.empty()) {
indices.push_back(0);
values.push_back(0);
}
*indices_tensor = Tensor(DataTypeToEnum<int64_t>::value,
{static_cast<int64_t>(indices.size())});
std::copy_n(indices.begin(), indices.size(),
indices_tensor->flat<int64_t>().data());
*values_tensor = Tensor(DataTypeToEnum<float>::value,
{static_cast<int64_t>(values.size())});
std::copy_n(values.begin(), values.size(),
values_tensor->flat<float>().data());
return OkStatus();
}
void CreateConstNode(const Tensor& tensor, const string& name,
NodeDef* node_def) {
node_def->set_op("Const");
node_def->set_name(name);
SetNodeTensorAttr<float>("value", tensor, node_def);
}
string GetMonolithicTensorKey(const string& tensor_slice_name) {
std::vector<string> names = Split(tensor_slice_name, "/");
if (absl::StartsWith(names[names.size() - 1], "part_")) {
CHECK_GE(names.size(), 2);
names.pop_back();
}
return absl::StrJoin(names, "/");
}
Status ObtainTensorSlice(const GraphDef& input_graph_def,
const string& target_name,
string* shape_slice_string) {
string restore_node_name;
for (const auto& node : input_graph_def.node()) {
std::vector<string> node_name_parts = Split(node.name(), "/");
if (node_name_parts.size() == 2 &&
absl::StartsWith(node_name_parts[0], "save") &&
absl::StartsWith(node_name_parts[1], "Assign") &&
node.input(0) == target_name) {
restore_node_name = node.input(1);
break;
}
}
std::vector<string> restore_node_parts = Split(restore_node_name, ":");
CHECK_LE(restore_node_parts.size(), 2);
string tensor_names_node;
string shape_and_slices_node;
for (const auto& node : input_graph_def.node()) {
if ((node.name() == restore_node_parts[0]) && (node.op() == "RestoreV2")) {
tensor_names_node = node.input(1);
shape_and_slices_node = node.input(2);
break;
}
}
int offset = -1;
for (const auto& node : input_graph_def.node()) {
if (node.name() == tensor_names_node) {
Tensor tensor_names_tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &tensor_names_tensor));
const auto& tensor_names_value = tensor_names_tensor.flat<tstring>();
for (int i = 0; i < tensor_names_value.size(); i++) {
if (tensor_names_value(i) == GetMonolithicTensorKey(target_name)) {
offset = i;
break;
}
}
}
}
if (offset == -1) {
return errors::Internal("Unable to find RestoreV2 entry for variable: ",
target_name);
}
for (const auto& node : input_graph_def.node()) {
if (node.name() == shape_and_slices_node) {
Tensor shape_and_slices_tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &shape_and_slices_tensor));
const auto& shape_and_slices_value =
shape_and_slices_tensor.flat<tstring>();
*shape_slice_string = shape_and_slices_value(offset);
return OkStatus();
}
}
return errors::Internal("Unable to find slice for variable: ", target_name);
}
Status ReadTensorFromCheckpoint(
const string& tensor_name, const std::unique_ptr<BundleReader>& ckpt_reader,
const string& shape_and_slice, Tensor* tensor) {
if (ckpt_reader) {
TensorShape parsed_full_shape;
TensorSlice parsed_slice;
TensorShape parsed_slice_shape;
bool get_slice = false;
if (!shape_and_slice.empty()) {
TF_RETURN_IF_ERROR(
checkpoint::ParseShapeAndSlice(shape_and_slice, &parsed_full_shape,
&parsed_slice, &parsed_slice_shape));
get_slice = (parsed_full_shape != parsed_slice_shape);
}
if (get_slice) {
TF_RETURN_IF_ERROR(ckpt_reader->LookupSlice(
GetMonolithicTensorKey(tensor_name), parsed_slice, tensor));
} else {
TF_RETURN_IF_ERROR(
ckpt_reader->Lookup(GetMonolithicTensorKey(tensor_name), tensor));
}
return OkStatus();
}
return errors::Internal("Checkpoint reader was not initialized. ");
}
Status InitializeCheckpointReader(const TransformFuncContext& context,
std::unique_ptr<BundleReader>* ckpt_reader) {
if (context.params.count("input_checkpoint")) {
const string input_checkpoint = context.params.at("input_checkpoint")[0];
ckpt_reader->reset(new BundleReader(Env::Default(), input_checkpoint));
TF_RETURN_IF_ERROR((*ckpt_reader)->status());
}
return OkStatus();
}
Status ObtainVariableInfo(
const GraphDef& input_graph_def,
std::unique_ptr<std::unordered_map<string, string> >* shapes_and_slices) {
shapes_and_slices->reset(new std::unordered_map<string, string>());
for (const auto& node : input_graph_def.node()) {
if ((node.op() == "Variable") || (node.op() == "VariableV2")) {
string s;
TF_RETURN_IF_ERROR(ObtainTensorSlice(input_graph_def, node.name(), &s));
(**shapes_and_slices)[node.name()] = s;
}
}
return OkStatus();
}
Status RemoveInputAtIndex(NodeDef* n, int index) {
for (int i = index; i < n->input_size() - 1; i++) {
n->mutable_input()->SwapElements(i, i + 1);
}
n->mutable_input()->RemoveLast();
return OkStatus();
}
Status RemoveNodeAtIndex(GraphDef* g, int index) {
for (int i = index; i < g->node_size() - 1; i++) {
g->mutable_node()->SwapElements(i, i + 1);
}
g->mutable_node()->RemoveLast();
return OkStatus();
}
Status SparsifyGatherInternal(
const GraphDef& input_graph_def,
const std::unique_ptr<std::unordered_map<string, string> >&
shapes_and_slices,
const TransformFuncContext& context, const OpTypePattern& pattern,
const std::unique_ptr<BundleReader>& ckpt_reader,
GraphDef* output_graph_def) {
string group_init_node = "group_deps";
if (context.params.count("group_init_node")) {
group_init_node = context.params.at("group_init_node")[0];
}
GraphDef current_graph_def = input_graph_def;
bool any_match_found = false;
std::unordered_map<string, int> refs;
for (const auto& node : current_graph_def.node()) {
for (const auto& input : node.input()) {
auto parsed_input = StringReplace(input, "^", "", true);
refs[parsed_input] += 1;
}
}
do {
any_match_found = false;
GraphDef replaced_graph_def = current_graph_def;
std::vector<string> init_table_node_names;
std::vector<string> removed_node_names;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, pattern,
[&ckpt_reader, &any_match_found, &init_table_node_names,
&shapes_and_slices, &removed_node_names,
&refs](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
any_match_found = true;
const NodeDef& gather_node = match.node;
if (gather_node.op() == "GatherV2") {
const NodeDef& axis_node = match.inputs[2].node;
Tensor axis_t;
TF_RETURN_IF_ERROR(GetNodeAttr(axis_node, "value", &axis_t));
int64_t axis = 0;
if (axis_t.dtype() == DT_INT32) {
axis = axis_t.scalar<int32>()();
} else if (axis_t.dtype() == DT_INT64) {
axis = axis_t.scalar<int64_t>()();
} else {
return tensorflow::errors::FailedPrecondition(
"Gather axis was not int32 or int64.");
}
if (axis != 0) {
return tensorflow::errors::FailedPrecondition(
"Transform only applicable to subgraph with GatherV2 over "
"axis 0. Found axis ",
axis, ".");
}
}
const NodeDef& weights_node = match.inputs[0].inputs[0].node;
DataType data_type;
TF_RETURN_IF_ERROR(GetNodeAttr(weights_node, "dtype", &data_type));
if (data_type != DT_FLOAT) {
return tensorflow::errors::FailedPrecondition(
"Transform only applicable to subgraph with 'Const',"
"'Variable', or 'VariableV2' of dtype "
"'DT_FLOAT'. Found '" +
weights_node.op() + "' with name '",
weights_node.name(), "' and dtype '", data_type, "'.");
}
Tensor weight;
if (weights_node.op() == "Const") {
weight = GetNodeTensorAttr(weights_node, "value");
} else {
TF_RETURN_IF_ERROR(ReadTensorFromCheckpoint(
weights_node.name(), ckpt_reader,
(*shapes_and_slices)[weights_node.name()], &weight));
}
removed_node_names.push_back(weights_node.name());
removed_node_names.push_back(match.inputs[0].node.name());
for (auto input_node : match.inputs[0].node.input()) {
auto parsed_input = StringReplace(input_node, "^", "", true);
refs[parsed_input]--;
}
Tensor indices_tensor;
Tensor values_tensor;
TF_RETURN_IF_ERROR(
SparsifyWeights(weight, &indices_tensor, &values_tensor));
DataType key_dtype = DT_INT64;
NodeDef indices_node;
CreateConstNode(indices_tensor,
StrCat(weights_node.name(), "/indices"),
&indices_node);
SetNodeAttr("dtype", key_dtype, &indices_node);
NodeDef values_node;
CreateConstNode(values_tensor, StrCat(weights_node.name(), "/values"),
&values_node);
SetNodeAttr("dtype", data_type, &values_node);
NodeDef hashtable_node;
hashtable_node.set_op("HashTable");
hashtable_node.set_name(StrCat(weights_node.name(), "/HashTable"));
SetNodeAttr("key_dtype", key_dtype, &hashtable_node);
SetNodeAttr("value_dtype", data_type, &hashtable_node);
NodeDef init_table_node;
init_table_node.set_op("InitializeTable");
init_table_node.set_name(
StrCat(weights_node.name(), "/InitializeTable"));
SetNodeAttr("Tkey", key_dtype, &init_table_node);
SetNodeAttr("Tval", data_type, &init_table_node);
init_table_node_names.push_back(init_table_node.name());
NodeDef lookup_node;
lookup_node.set_op("LookupTableFind");
lookup_node.set_name(StrCat(gather_node.name(), "/LookupTableFind"));
SetNodeAttr("Tin", key_dtype, &lookup_node);
SetNodeAttr("Tout", data_type, &lookup_node);
Tensor zero_tensor(data_type, TensorShape({}));
zero_tensor.flat<float>()(0) = 0.0;
NodeDef default_value_node;
CreateConstNode(zero_tensor, StrCat(gather_node.name(), "/Const"),
&default_value_node);
SetNodeAttr("dtype", data_type, &default_value_node);
Tensor dim_idx(DT_INT32, TensorShape({}));
dim_idx.flat<int32>()(0) = -1;
NodeDef dim_idx_node;
dim_idx_node.set_op("Const");
dim_idx_node.set_name(
StrCat(gather_node.name(), "/ExpandDims/Const"));
SetNodeAttr("value", dim_idx, &dim_idx_node);
SetNodeAttr("dtype", DT_INT32, &dim_idx_node);
NodeDef expand_dims_node;
expand_dims_node.set_op("ExpandDims");
expand_dims_node.set_name(gather_node.name());
SetNodeAttr("T", data_type, &expand_dims_node);
AddNodeInput(hashtable_node.name(), &init_table_node);
refs[hashtable_node.name()]++;
AddNodeInput(indices_node.name(), &init_table_node);
refs[indices_node.name()]++;
AddNodeInput(values_node.name(), &init_table_node);
refs[values_node.name()]++;
AddNodeInput(hashtable_node.name(), &lookup_node);
refs[hashtable_node.name()]++;
AddNodeInput(gather_node.input(1), &lookup_node);
refs[gather_node.input(1)]++;
AddNodeInput(default_value_node.name(), &lookup_node);
refs[default_value_node.name()]++;
AddNodeInput(lookup_node.name(), &expand_dims_node);
refs[lookup_node.name()]++;
AddNodeInput(dim_idx_node.name(), &expand_dims_node);
refs[dim_idx_node.name()]++;
new_nodes->push_back(match.inputs[1].node);
new_nodes->push_back(indices_node);
new_nodes->push_back(values_node);
new_nodes->push_back(hashtable_node);
new_nodes->push_back(init_table_node);
new_nodes->push_back(lookup_node);
new_nodes->push_back(default_value_node);
new_nodes->push_back(dim_idx_node);
new_nodes->push_back(expand_dims_node);
return OkStatus();
},
{true}, &replaced_graph_def));
NodeDef* init_op = nullptr;
for (int i = 0; i < replaced_graph_def.node_size(); i++) {
if (replaced_graph_def.node(i).name() == group_init_node &&
replaced_graph_def.node(i).op() == "NoOp") {
init_op = replaced_graph_def.mutable_node(i);
break;
}
}
if (!init_op) {
init_op = replaced_graph_def.mutable_node()->Add();
init_op->set_op("NoOp");
init_op->set_name(group_init_node);
}
for (const string& name : init_table_node_names) {
AddNodeInput(StrCat("^", name), init_op);
refs[name]++;
}
for (const auto& output : context.output_names) {
refs.erase(output);
}
for (const auto& input : context.input_names) {
refs.erase(input);
}
for (const auto& entry : refs) {
if (entry.second == 0) {
removed_node_names.push_back(entry.first);
}
}
while (!removed_node_names.empty()) {
auto name = removed_node_names.back();
removed_node_names.pop_back();
int i = 0;
while (i < replaced_graph_def.node_size()) {
if ((replaced_graph_def.node(i).name() == name) &&
(replaced_graph_def.node(i).op() != "RestoreV2")) {
for (const auto& input : replaced_graph_def.node(i).input()) {
auto parsed_input = StringReplace(input, "^", "", true);
refs[parsed_input] -= 1;
if (refs[parsed_input] == 0) {
removed_node_names.push_back(parsed_input);
}
}
TF_RETURN_IF_ERROR(RemoveNodeAtIndex(&replaced_graph_def, i));
continue;
}
int j = 0;
bool deleted_inputs = false;
while (j < replaced_graph_def.node(i).input_size()) {
if (replaced_graph_def.node(i).input(j) == name ||
replaced_graph_def.node(i).input(j) == ("^" + name)) {
TF_RETURN_IF_ERROR(
RemoveInputAtIndex(replaced_graph_def.mutable_node(i), j));
deleted_inputs = true;
continue;
}
j++;
}
if (deleted_inputs) {
if (replaced_graph_def.node(i).op() == "ConcatV2") {
if (replaced_graph_def.node(i).input_size() > 2) {
SetNodeAttr("N", replaced_graph_def.node(i).input_size() - 1,
replaced_graph_def.mutable_node(i));
} else if (replaced_graph_def.node(i).input_size() == 2) {
if (refs[replaced_graph_def.node(i).input(1)] != 1) {
return errors::Internal(
"Expect axis tensor of ConcatV2 node to only be referenced "
"once.");
}
refs[replaced_graph_def.node(i).input(1)] -= 1;
removed_node_names.push_back(replaced_graph_def.node(i).input(1));
replaced_graph_def.mutable_node(i)->mutable_input()->RemoveLast();
replaced_graph_def.mutable_node(i)->mutable_attr()->erase("N");
replaced_graph_def.mutable_node(i)->set_op("Identity");
} else {
return errors::Internal(
"ConcatV2 should have at least two elements");
}
}
if ((replaced_graph_def.node(i).op() == "Assign" ||
replaced_graph_def.node(i).op() == "Reshape" ||
replaced_graph_def.node(i).op() == "Equal" ||
replaced_graph_def.node(i).op() == "Mean" ||
replaced_graph_def.node(i).op() == "ScalarSummary") &&
replaced_graph_def.node(i).input_size() == 1) {
removed_node_names.push_back(replaced_graph_def.node(i).name());
}
if (!replaced_graph_def.node(i).input_size()) {
removed_node_names.push_back(replaced_graph_def.node(i).name());
}
}
i++;
}
}
current_graph_def = replaced_graph_def;
} while (any_match_found);
*output_graph_def = current_graph_def;
return OkStatus();
}
Status SparsifyGather(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
const OpTypePattern gather_pattern =
{"Gather",
{
{"Identity",
{
{"Const|Variable|VariableV2"}
}
},
{"*"},
}
};
const OpTypePattern gather_v2_pattern =
{"GatherV2",
{
{"Identity",
{
{"Const|Variable|VariableV2"}
}
},
{"*"},
{"Const"},
}
};
GraphDef cleaned_input_graph_def;
RemoveAttributes(input_graph_def, {"_output_shapes"},
&cleaned_input_graph_def);
GraphDef temp_output;
std::unique_ptr<BundleReader> ckpt_reader;
TF_RETURN_IF_ERROR(InitializeCheckpointReader(context, &ckpt_reader));
std::unique_ptr<std::unordered_map<string, string> > shapes_and_slices;
TF_RETURN_IF_ERROR(
ObtainVariableInfo(cleaned_input_graph_def, &shapes_and_slices));
TF_RETURN_IF_ERROR(SparsifyGatherInternal(
cleaned_input_graph_def, shapes_and_slices, context, gather_pattern,
ckpt_reader, &temp_output));
TF_RETURN_IF_ERROR(SparsifyGatherInternal(temp_output, shapes_and_slices,
context, gather_v2_pattern,
ckpt_reader, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("sparsify_gather", SparsifyGather);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status SparsifyGather(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status ReadTensorFromCheckpoint(
const string& tensor_name, const std::unique_ptr<BundleReader>& ckpt_reader,
const string& shape_and_slice, Tensor* tensor);
class SparsifyGatherTest : public ::testing::Test {
protected:
NodeDef* CreateNode(const StringPiece name, const StringPiece op,
const std::vector<NodeDef*>& inputs, GraphDef* graph_def,
bool control_dep = false) {
NodeDef* node_def = graph_def->add_node();
node_def->set_name(string(name));
node_def->set_op(string(op));
if (!control_dep) {
std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) {
node_def->add_input(input->name());
});
} else {
std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) {
node_def->add_input(strings::StrCat("^", input->name()));
});
}
return node_def;
}
void MakeGather(StringPiece name, bool gather_v2, NodeDef* params,
NodeDef* indices, GraphDef* graph_def) {
if (gather_v2) {
NodeDef* axis_node =
CreateNode(strings::StrCat(name, "_axis"), "Const", {}, graph_def);
Tensor axis_t(DT_INT32, TensorShape({}));
axis_t.scalar<int32>()() = 0;
SetNodeTensorAttr<int32>("value", axis_t, axis_node);
CreateNode(name, "GatherV2", {params, indices, axis_node}, graph_def);
} else {
CreateNode(name, "Gather", {params, indices}, graph_def);
}
}
void TestSinglePartition(bool gather_v2, bool include_shared_init,
bool test_variable, bool test_kept_concat,
const string& shared_init_name = "group_deps") {
GraphDef graph_def;
const auto checkpoint_path =
io::JoinPath(testing::TmpDir(), "checkpoint_single");
NodeDef* input_node = CreateNode("ids", "Const", {}, &graph_def);
NodeDef* w_node;
NodeDef* zeros_const;
NodeDef* zeros_shape;
NodeDef* zeros_node;
NodeDef* assign_node;
Tensor weights(DT_FLOAT, TensorShape({4, 1}));
test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001});
if (!test_variable) {
w_node = CreateNode("w/part_1", "Const", {}, &graph_def);
SetNodeTensorAttr<float>("value", weights, w_node);
} else {
w_node = CreateNode("w/part_1", "VariableV2", {}, &graph_def);
zeros_shape = CreateNode("w/part_1/Initializer/zeros/shape_as_tensor",
"Const", {}, &graph_def);
zeros_const = CreateNode("w/part_1/Initializer/zeros/Const", "Const", {},
&graph_def);
zeros_node = CreateNode("w/part_1/Initializer/zeros", "Fill",
{zeros_shape, zeros_const}, &graph_def);
assign_node = CreateNode("w/part_1/Assign", "Assign",
{w_node, zeros_node}, &graph_def);
NodeDef* save_const_node =
CreateNode("save/Const", "Const", {}, &graph_def);
Tensor tensor_names_values(DT_STRING, TensorShape({1}));
test::FillValues<tstring>(&tensor_names_values, {"w"});
NodeDef* tensor_names_node =
CreateNode("save/RestoreV2/tensor_names", "Const", {}, &graph_def);
SetNodeTensorAttr<string>("value", tensor_names_values,
tensor_names_node);
NodeDef* tensor_shapes_slices_node = CreateNode(
"save/RestoreV2/shape_and_slices", "Const", {}, &graph_def);
Tensor shapes_slices_val(DT_STRING, TensorShape({1}));
shapes_slices_val.flat<tstring>()(0) = "4 1 0,4:0,1";
SetNodeTensorAttr<string>("value", shapes_slices_val,
tensor_shapes_slices_node);
NodeDef* restore_node = CreateNode(
"save/RestoreV2", "RestoreV2",
{save_const_node, tensor_names_node, tensor_shapes_slices_node},
&graph_def);
CreateNode("save/Assign", "Assign", {w_node, restore_node}, &graph_def);
BundleWriter writer(Env::Default(), checkpoint_path);
TF_ASSERT_OK(writer.Add("w", weights));
TF_ASSERT_OK(writer.Finish());
}
SetNodeAttr("dtype", DT_FLOAT, w_node);
NodeDef* identity_node =
CreateNode("w/read", "Identity", {w_node}, &graph_def);
MakeGather("gather", gather_v2, identity_node, input_node, &graph_def);
if (include_shared_init) {
if (!test_variable) {
CreateNode(shared_init_name, "NoOp", {}, &graph_def);
} else {
CreateNode(shared_init_name, "NoOp", {assign_node}, &graph_def, true);
}
}
NodeDef* concat_axis_node =
CreateNode("linear/concat/axis", "Const", {}, &graph_def);
NodeDef* concat_input_node =
CreateNode("concat/input/node", "Const", {}, &graph_def);
NodeDef* concat_node = nullptr;
if (!test_kept_concat) {
concat_node = CreateNode(
"concat/node", "ConcatV2",
{identity_node, concat_input_node, concat_axis_node}, &graph_def);
SetNodeAttr("N", 2, concat_node);
} else {
NodeDef* concat_input_node_2 =
CreateNode("concat/input/node_2", "Const", {}, &graph_def);
concat_node = CreateNode("concat/node", "ConcatV2",
{identity_node, concat_input_node,
concat_input_node_2, concat_axis_node},
&graph_def);
SetNodeAttr("N", 3, concat_node);
}
GraphDef result;
TransformFuncContext context;
context.input_names = {"ids"};
context.output_names = {"gather"};
if (test_variable) {
context.params["input_checkpoint"] = {checkpoint_path};
}
if (shared_init_name != "group_deps") {
context.params["group_init_node"] = {shared_init_name};
}
TF_ASSERT_OK(SparsifyGather(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(0,
node_lookup.count("w/part_1/Initializer/zeros/shape_as_tensor"));
EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros/Const"));
EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros"));
EXPECT_EQ(0, node_lookup.count("w/part_1/Assign"));
EXPECT_EQ(1, node_lookup.count("ids"));
EXPECT_EQ("Const", node_lookup.at("ids")->op());
EXPECT_EQ(1, node_lookup.count("concat/node"));
if (!test_kept_concat) {
EXPECT_EQ(0, node_lookup.count("linear/concat/axis"));
EXPECT_EQ("Identity", node_lookup.at("concat/node")->op());
EXPECT_EQ(1, node_lookup.at("concat/node")->input_size());
EXPECT_EQ("concat/input/node", node_lookup.at("concat/node")->input(0));
} else {
EXPECT_EQ(1, node_lookup.count("linear/concat/axis"));
EXPECT_EQ("ConcatV2", node_lookup.at("concat/node")->op());
EXPECT_EQ(3, node_lookup.at("concat/node")->input_size());
EXPECT_EQ("concat/input/node", node_lookup.at("concat/node")->input(0));
EXPECT_EQ("concat/input/node_2", node_lookup.at("concat/node")->input(1));
EXPECT_EQ("linear/concat/axis", node_lookup.at("concat/node")->input(2));
EXPECT_EQ(2, node_lookup.at("concat/node")->attr().at("N").i());
}
EXPECT_EQ(1, node_lookup.count("w/part_1/indices"));
EXPECT_EQ("Const", node_lookup.at("w/part_1/indices")->op());
Tensor expected_indices_tensor(DT_INT64, TensorShape({3}));
test::FillValues<int64_t>(&expected_indices_tensor, {0, 2, 3});
test::ExpectTensorEqual<int64_t>(
expected_indices_tensor,
GetNodeTensorAttr(*(node_lookup.at("w/part_1/indices")), "value"));
EXPECT_EQ(1, node_lookup.count("w/part_1/values"));
EXPECT_EQ("Const", node_lookup.at("w/part_1/values")->op());
Tensor expected_values_tensor(DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected_values_tensor, {0.2, 1.2, 0.001});
test::ExpectTensorNear<float>(
expected_values_tensor,
GetNodeTensorAttr(*(node_lookup.at("w/part_1/values")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("w/part_1/HashTable"));
EXPECT_EQ("HashTable", node_lookup.at("w/part_1/HashTable")->op());
EXPECT_EQ(1, node_lookup.count("w/part_1/InitializeTable"));
EXPECT_EQ("InitializeTable",
node_lookup.at("w/part_1/InitializeTable")->op());
EXPECT_EQ(1, node_lookup.count("gather/LookupTableFind"));
EXPECT_EQ("LookupTableFind",
node_lookup.at("gather/LookupTableFind")->op());
EXPECT_EQ(1, node_lookup.count("gather/Const"));
EXPECT_EQ("Const", node_lookup.at("gather/Const")->op());
Tensor expected_gather_default_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_gather_default_tensor, {0.0});
test::ExpectTensorNear<float>(
expected_gather_default_tensor,
GetNodeTensorAttr(*(node_lookup.at("gather/Const")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("gather/ExpandDims/Const"));
EXPECT_EQ("Const", node_lookup.at("gather/ExpandDims/Const")->op());
Tensor expected_expand_dims_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&expected_expand_dims_tensor, {-1});
test::ExpectTensorEqual<int32>(
expected_expand_dims_tensor,
GetNodeTensorAttr(*(node_lookup.at("gather/ExpandDims/Const")),
"value"));
EXPECT_EQ(1, node_lookup.count("gather"));
EXPECT_EQ("ExpandDims", node_lookup.at("gather")->op());
EXPECT_EQ(1, node_lookup.count(shared_init_name));
EXPECT_EQ("NoOp", node_lookup.at(shared_init_name)->op());
EXPECT_EQ("w/part_1/HashTable",
node_lookup.at("w/part_1/InitializeTable")->input(0));
EXPECT_EQ("w/part_1/indices",
node_lookup.at("w/part_1/InitializeTable")->input(1));
EXPECT_EQ("w/part_1/values",
node_lookup.at("w/part_1/InitializeTable")->input(2));
EXPECT_EQ("w/part_1/HashTable",
node_lookup.at("gather/LookupTableFind")->input(0));
EXPECT_EQ("ids", node_lookup.at("gather/LookupTableFind")->input(1));
EXPECT_EQ("gather/Const",
node_lookup.at("gather/LookupTableFind")->input(2));
EXPECT_EQ("gather/LookupTableFind", node_lookup.at("gather")->input(0));
EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(),
node_lookup.at(shared_init_name)->input().end(),
"^w/part_1/InitializeTable"),
node_lookup.at(shared_init_name)->input().end());
EXPECT_EQ(1, node_lookup.at(shared_init_name)->input().size());
}
void TestMultiPartition(bool gather_v2, bool include_shared_init,
bool test_variable,
const string& shared_init_name = "group_deps") {
GraphDef graph_def;
const auto checkpoint_path =
io::JoinPath(testing::TmpDir(), "checkpoint_multiple");
NodeDef* input_node = CreateNode("ids", "Const", {}, &graph_def);
NodeDef* w_node1;
NodeDef* w_node2;
NodeDef* zeros_const1;
NodeDef* zeros_shape1;
NodeDef* zeros_node1;
NodeDef* zeros_const2;
NodeDef* zeros_shape2;
NodeDef* zeros_node2;
NodeDef* assign_node1;
NodeDef* assign_node2;
Tensor weights(DT_FLOAT, TensorShape({4, 1}));
test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001});
if (!test_variable) {
w_node1 = CreateNode("w1/part_1", "Const", {}, &graph_def);
w_node2 = CreateNode("w2/part_1", "Const", {}, &graph_def);
SetNodeTensorAttr<float>("value", weights, w_node1);
SetNodeTensorAttr<float>("value", weights, w_node2);
} else {
NodeDef* save_const_node =
CreateNode("save/Const", "Const", {}, &graph_def);
NodeDef* tensor_names_node =
CreateNode("save/RestoreV2/tensor_names", "Const", {}, &graph_def);
Tensor tensor_names_values(DT_STRING, TensorShape({2}));
test::FillValues<tstring>(&tensor_names_values, {"w1", "w2"});
SetNodeTensorAttr<string>("value", tensor_names_values,
tensor_names_node);
NodeDef* tensor_shapes_slices_node = CreateNode(
"save/RestoreV2/shape_and_slices", "Const", {}, &graph_def);
Tensor shapes_slices_val(DT_STRING, TensorShape({2}));
shapes_slices_val.flat<tstring>()(0) = "4 1 0,4:0,1";
shapes_slices_val.flat<tstring>()(1) = "4 1 0,4:0,1";
SetNodeTensorAttr<string>("value", shapes_slices_val,
tensor_shapes_slices_node);
NodeDef* restore_node = CreateNode(
"save/RestoreV2", "RestoreV2",
{save_const_node, tensor_names_node, tensor_shapes_slices_node},
&graph_def);
w_node1 = CreateNode("w1/part_1", "VariableV2", {}, &graph_def);
zeros_shape1 = CreateNode("w1/part_1/Initializer/zeros/shape_as_tensor",
"Const", {}, &graph_def);
zeros_const1 = CreateNode("w1/part_1/Initializer/zeros/Const", "Const",
{}, &graph_def);
zeros_node1 = CreateNode("w1/part_1/Initializer/zeros", "Fill",
{zeros_shape1, zeros_const1}, &graph_def);
assign_node1 = CreateNode("w1/part_1/Assign", "Assign",
{w_node1, zeros_node1}, &graph_def);
CreateNode("save/Assign", "Assign", {w_node1, restore_node}, &graph_def);
w_node2 = CreateNode("w2/part_1", "VariableV2", {}, &graph_def);
zeros_shape2 = CreateNode("w2/part_1/Initializer/zeros/shape_as_tensor",
"Const", {}, &graph_def);
zeros_const2 = CreateNode("w2/part_1/Initializer/zeros/Const", "Const",
{}, &graph_def);
zeros_node2 = CreateNode("w2/part_1/Initializer/zeros", "Fill",
{zeros_shape2, zeros_const2}, &graph_def);
assign_node2 = CreateNode("w2/part_1/Assign", "Assign",
{w_node2, zeros_node2}, &graph_def);
CreateNode("save/Assign_1", "Assign", {w_node2, restore_node},
&graph_def);
BundleWriter writer(Env::Default(), checkpoint_path);
TF_ASSERT_OK(writer.Add("w1", weights));
TF_ASSERT_OK(writer.Add("w2", weights));
TF_ASSERT_OK(writer.Finish());
}
SetNodeAttr("dtype", DT_FLOAT, w_node1);
SetNodeAttr("dtype", DT_FLOAT, w_node2);
NodeDef* identity_node1 =
CreateNode("w1/part_1/read", "Identity", {w_node1}, &graph_def);
NodeDef* identity_node2 =
CreateNode("w2/part_1/read", "Identity", {w_node2}, &graph_def);
MakeGather("gather1", gather_v2, identity_node1, input_node, &graph_def);
MakeGather("gather2", gather_v2, identity_node2, input_node, &graph_def);
NodeDef* concat_axis_node =
CreateNode("linear/concat/axis", "Const", {}, &graph_def);
NodeDef* concat_node = CreateNode(
"concat/node", "ConcatV2",
{identity_node1, identity_node2, concat_axis_node}, &graph_def);
SetNodeAttr("N", 2, concat_node);
if (include_shared_init) {
if (!test_variable) {
CreateNode(shared_init_name, "NoOp", {}, &graph_def);
} else {
CreateNode(shared_init_name, "NoOp", {assign_node1, assign_node2},
&graph_def, true);
}
}
GraphDef result;
TransformFuncContext context;
context.input_names = {"ids"};
context.output_names = {"gather1", "gather2"};
if (test_variable) {
context.params["input_checkpoint"] = {checkpoint_path};
}
if (shared_init_name != "group_deps") {
context.params["group_init_node"] = {shared_init_name};
}
TF_ASSERT_OK(SparsifyGather(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(0,
node_lookup.count("w1/part_1/Initializer/zeros/shape_as_tensor"));
EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros/Const"));
EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros"));
EXPECT_EQ(0, node_lookup.count("w1/part_1/Assign"));
EXPECT_EQ(0,
node_lookup.count("w2/part_1/Initializer/zeros/shape_as_tensor"));
EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros/Const"));
EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros"));
EXPECT_EQ(0, node_lookup.count("w2/part_1/Assign"));
EXPECT_EQ(1, node_lookup.count("ids"));
EXPECT_EQ("Const", node_lookup.at("ids")->op());
EXPECT_EQ(1, node_lookup.count(shared_init_name));
EXPECT_EQ("NoOp", node_lookup.at(shared_init_name)->op());
EXPECT_EQ(1, node_lookup.count("w1/part_1/indices"));
EXPECT_EQ("Const", node_lookup.at("w1/part_1/indices")->op());
Tensor expected_indices_tensor1(DT_INT64, TensorShape({3}));
test::FillValues<int64_t>(&expected_indices_tensor1, {0, 2, 3});
test::ExpectTensorEqual<int64_t>(
expected_indices_tensor1,
GetNodeTensorAttr(*(node_lookup.at("w1/part_1/indices")), "value"));
EXPECT_EQ(1, node_lookup.count("w1/part_1/values"));
EXPECT_EQ("Const", node_lookup.at("w1/part_1/values")->op());
Tensor expected_values_tensor1(DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected_values_tensor1, {0.2, 1.2, 0.001});
test::ExpectTensorNear<float>(
expected_values_tensor1,
GetNodeTensorAttr(*(node_lookup.at("w1/part_1/values")), "value"),
1e-5);
EXPECT_EQ(1, node_lookup.count("w1/part_1/HashTable"));
EXPECT_EQ("HashTable", node_lookup.at("w1/part_1/HashTable")->op());
EXPECT_EQ(1, node_lookup.count("w1/part_1/InitializeTable"));
EXPECT_EQ("InitializeTable",
node_lookup.at("w1/part_1/InitializeTable")->op());
EXPECT_EQ(1, node_lookup.count("gather1/LookupTableFind"));
EXPECT_EQ("LookupTableFind",
node_lookup.at("gather1/LookupTableFind")->op());
EXPECT_EQ(1, node_lookup.count("gather1/Const"));
EXPECT_EQ("Const", node_lookup.at("gather1/Const")->op());
Tensor expected_gather_default_tensor1(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_gather_default_tensor1, {0.0});
test::ExpectTensorNear<float>(
expected_gather_default_tensor1,
GetNodeTensorAttr(*(node_lookup.at("gather1/Const")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("gather1/ExpandDims/Const"));
EXPECT_EQ("Const", node_lookup.at("gather1/ExpandDims/Const")->op());
Tensor expected_expand_dims_tensor1(DT_INT32, TensorShape({}));
test::FillValues<int32>(&expected_expand_dims_tensor1, {-1});
test::ExpectTensorEqual<int32>(
expected_expand_dims_tensor1,
GetNodeTensorAttr(*(node_lookup.at("gather1/ExpandDims/Const")),
"value"));
EXPECT_EQ(1, node_lookup.count("gather1"));
EXPECT_EQ("ExpandDims", node_lookup.at("gather1")->op());
EXPECT_EQ(1, node_lookup.count("w2/part_1/indices"));
EXPECT_EQ("Const", node_lookup.at("w2/part_1/indices")->op());
Tensor expected_indices_tensor2(DT_INT64, TensorShape({3}));
test::FillValues<int64_t>(&expected_indices_tensor2, {0, 2, 3});
test::ExpectTensorEqual<int64_t>(
expected_indices_tensor2,
GetNodeTensorAttr(*(node_lookup.at("w2/part_1/indices")), "value"));
EXPECT_EQ(1, node_lookup.count("w2/part_1/values"));
EXPECT_EQ("Const", node_lookup.at("w2/part_1/values")->op());
Tensor expected_values_tensor2(DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected_values_tensor2, {0.2, 1.2, 0.001});
test::ExpectTensorNear<float>(
expected_values_tensor2,
GetNodeTensorAttr(*(node_lookup.at("w2/part_1/values")), "value"),
1e-5);
EXPECT_EQ(1, node_lookup.count("w2/part_1/HashTable"));
EXPECT_EQ("HashTable", node_lookup.at("w2/part_1/HashTable")->op());
EXPECT_EQ(1, node_lookup.count("w2/part_1/InitializeTable"));
EXPECT_EQ("InitializeTable",
node_lookup.at("w2/part_1/InitializeTable")->op());
EXPECT_EQ(1, node_lookup.count("gather2/LookupTableFind"));
EXPECT_EQ("LookupTableFind",
node_lookup.at("gather2/LookupTableFind")->op());
EXPECT_EQ(1, node_lookup.count("gather2/Const"));
EXPECT_EQ("Const", node_lookup.at("gather2/Const")->op());
Tensor expected_gather_default_tensor2(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_gather_default_tensor2, {0.0});
test::ExpectTensorNear<float>(
expected_gather_default_tensor2,
GetNodeTensorAttr(*(node_lookup.at("gather2/Const")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("gather2/ExpandDims/Const"));
EXPECT_EQ("Const", node_lookup.at("gather2/ExpandDims/Const")->op());
Tensor expected_expand_dims_tensor2(DT_INT32, TensorShape({}));
test::FillValues<int32>(&expected_expand_dims_tensor2, {-1});
test::ExpectTensorEqual<int32>(
expected_expand_dims_tensor2,
GetNodeTensorAttr(*(node_lookup.at("gather2/ExpandDims/Const")),
"value"));
EXPECT_EQ(1, node_lookup.count("gather2"));
EXPECT_EQ("ExpandDims", node_lookup.at("gather2")->op());
EXPECT_EQ("w1/part_1/HashTable",
node_lookup.at("w1/part_1/InitializeTable")->input(0));
EXPECT_EQ("w1/part_1/indices",
node_lookup.at("w1/part_1/InitializeTable")->input(1));
EXPECT_EQ("w1/part_1/values",
node_lookup.at("w1/part_1/InitializeTable")->input(2));
EXPECT_EQ("w2/part_1/HashTable",
node_lookup.at("w2/part_1/InitializeTable")->input(0));
EXPECT_EQ("w2/part_1/indices",
node_lookup.at("w2/part_1/InitializeTable")->input(1));
EXPECT_EQ("w2/part_1/values",
node_lookup.at("w2/part_1/InitializeTable")->input(2));
EXPECT_EQ("w1/part_1/HashTable",
node_lookup.at("gather1/LookupTableFind")->input(0));
EXPECT_EQ("ids", node_lookup.at("gather1/LookupTableFind")->input(1));
EXPECT_EQ("gather1/Const",
node_lookup.at("gather1/LookupTableFind")->input(2));
EXPECT_EQ("gather1/LookupTableFind", node_lookup.at("gather1")->input(0));
EXPECT_EQ("w2/part_1/HashTable",
node_lookup.at("gather2/LookupTableFind")->input(0));
EXPECT_EQ("ids", node_lookup.at("gather2/LookupTableFind")->input(1));
EXPECT_EQ("gather2/Const",
node_lookup.at("gather2/LookupTableFind")->input(2));
EXPECT_EQ("gather2/LookupTableFind", node_lookup.at("gather2")->input(0));
EXPECT_EQ(0, node_lookup.count("linear/concat/axis"));
EXPECT_EQ(0, node_lookup.count("concat/node"));
EXPECT_EQ(2, node_lookup.at(shared_init_name)->input_size());
EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(),
node_lookup.at(shared_init_name)->input().end(),
"^w1/part_1/InitializeTable"),
node_lookup.at(shared_init_name)->input().end());
EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(),
node_lookup.at(shared_init_name)->input().end(),
"^w2/part_1/InitializeTable"),
node_lookup.at(shared_init_name)->input().end());
}
void TestReadTensorSlice() {
const auto checkpoint_path =
io::JoinPath(testing::TmpDir(), "checkpoint_slice");
Tensor weights(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&weights, {0.2, 0.000001});
BundleWriter writer(Env::Default(), checkpoint_path);
TF_ASSERT_OK(writer.AddSlice("w", TensorShape({4, 1}),
TensorSlice::ParseOrDie("0,2:0,1"), weights));
TF_ASSERT_OK(writer.Finish());
std::unique_ptr<BundleReader> reader(
new BundleReader(Env::Default(), checkpoint_path));
Tensor results;
TF_ASSERT_OK(
ReadTensorFromCheckpoint("w/part_0", reader, "4 1 0,2:0,1", &results));
test::ExpectTensorEqual<float>(weights, results);
}
};
TEST_F(SparsifyGatherTest, TestSinglePartition) {
TestSinglePartition(false, false, false, false);
TestSinglePartition(false, true, false, false);
TestSinglePartition(true, false, false, false);
TestSinglePartition(true, true, false, false);
TestSinglePartition(false, false, true, false);
TestSinglePartition(false, true, true, false);
TestSinglePartition(true, false, true, false);
TestSinglePartition(true, true, true, false);
TestSinglePartition(false, true, false, false, "shared_inits");
TestSinglePartition(true, true, false, false, "shared_inits");
TestSinglePartition(false, true, true, false, "shared_inits");
TestSinglePartition(true, true, true, false, "shared_inits");
TestSinglePartition(false, false, false, true);
TestSinglePartition(false, true, false, true);
TestSinglePartition(true, false, false, true);
TestSinglePartition(true, true, false, true);
TestSinglePartition(false, false, true, true);
TestSinglePartition(false, true, true, true);
TestSinglePartition(true, false, true, true);
TestSinglePartition(true, true, true, true);
TestSinglePartition(false, true, false, true, "shared_inits");
TestSinglePartition(true, true, false, true, "shared_inits");
TestSinglePartition(false, true, true, true, "shared_inits");
TestSinglePartition(true, true, true, true, "shared_inits");
}
TEST_F(SparsifyGatherTest, TestMultiPartition) {
TestMultiPartition(false, false, false);
TestMultiPartition(false, true, false);
TestMultiPartition(true, false, false);
TestMultiPartition(true, true, false);
TestMultiPartition(false, false, true);
TestMultiPartition(false, true, true);
TestMultiPartition(true, false, true);
TestMultiPartition(true, true, true);
TestMultiPartition(false, true, false, "shared_inits");
TestMultiPartition(true, true, false, "shared_inits");
TestMultiPartition(false, true, true, "shared_inits");
TestMultiPartition(true, true, true, "shared_inits");
}
TEST_F(SparsifyGatherTest, TestTensorSlice) { TestReadTensorSlice(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/sparsify_gather.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/sparsify_gather_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
837280c5-10b4-4acb-af2b-bf67bd73c365 | cpp | tensorflow/tensorflow | round_weights | tensorflow/tools/graph_transforms/round_weights.cc | tensorflow/tools/graph_transforms/round_weights_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RoundWeights(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
int32_t num_steps;
TF_RETURN_IF_ERROR(
context.GetOneInt32Parameter("num_steps", 256, &num_steps));
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def, {"Const"},
[num_steps](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& old_const_node = match.node;
if (!old_const_node.attr().count("dtype")) {
return errors::InvalidArgument("No 'dtype' attribute for Const node ",
old_const_node.name());
}
if (!old_const_node.attr().count("value")) {
return errors::InvalidArgument("No 'value' attribute for Const node ",
old_const_node.name());
}
const DataType old_dtype = old_const_node.attr().at("dtype").type();
Tensor old_tensor;
if (!old_tensor.FromProto(old_const_node.attr().at("value").tensor())) {
return errors::InvalidArgument("Decoding Tensor failed for node",
old_const_node.name());
}
const size_t num_elements = old_tensor.NumElements();
if ((old_dtype != DT_FLOAT) || (num_elements < 16)) {
new_nodes->push_back(old_const_node);
return OkStatus();
}
const float* old_values = old_tensor.flat<float>().data();
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::min();
for (int i = 0; i < num_elements; ++i) {
const float value = old_values[i];
min = std::min(min, value);
max = std::max(max, value);
}
if (min == max) {
if (std::abs(min) < 0.000001f) {
max = min + 1.0f;
} else if (min > 0) {
max = 2.0f * min;
} else {
min = 2.0f * max;
}
}
Tensor rounded_tensor(DT_FLOAT, old_tensor.shape());
float* rounded_values = rounded_tensor.flat<float>().data();
const float bucket_width = (max - min) / num_steps;
for (int i = 0; i < num_elements; ++i) {
const int32_t bucket =
std::floor((old_values[i] - min) / bucket_width);
rounded_values[i] = min + (bucket_width * (bucket + 0.5f));
}
NodeDef rounded_const_node;
rounded_const_node.set_op("Const");
rounded_const_node.set_name(old_const_node.name());
SetNodeAttr("dtype", DT_FLOAT, &rounded_const_node);
SetNodeTensorAttr<float>("value", rounded_tensor, &rounded_const_node);
new_nodes->push_back(rounded_const_node);
return OkStatus();
},
{}, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("round_weights", RoundWeights);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RoundWeights(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class RoundWeightsTest : public ::testing::Test {
protected:
void TestRoundWeights() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 10}));
test::FillValues<float>(
&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f,
3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f,
0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f,
0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("output"), input_op, weights_op,
{1, 1, 1, 1}, "VALID");
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef rounded_graph_def;
TF_ASSERT_OK(
RoundWeights(original_graph_def, {{}, {"output"}}, &rounded_graph_def));
std::unique_ptr<Session> rounded_session(NewSession(SessionOptions()));
TF_ASSERT_OK(rounded_session->Create(rounded_graph_def));
std::vector<Tensor> rounded_outputs;
TF_ASSERT_OK(rounded_session->Run({}, {"output"}, {}, &rounded_outputs));
test::ExpectTensorNear<float>(original_outputs[0], rounded_outputs[0], 0.5);
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(rounded_graph_def, &node_lookup);
EXPECT_EQ(1, node_lookup.count("input_op"));
const NodeDef* r_input_op = node_lookup.at("input_op");
EXPECT_EQ(DT_FLOAT, r_input_op->attr().at("dtype").type());
EXPECT_EQ(1, node_lookup.count("weights_op"));
const NodeDef* r_weights_op = node_lookup.at("weights_op");
EXPECT_EQ("Const", r_weights_op->op());
EXPECT_EQ(DT_FLOAT, r_weights_op->attr().at("dtype").type());
}
};
TEST_F(RoundWeightsTest, TestRoundWeights) { TestRoundWeights(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/round_weights.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/round_weights_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecaeac1a-3764-4ad2-9601-bb802ba6ec39 | cpp | tensorflow/tensorflow | reduce_utils | tensorflow/lite/kernels/internal/optimized/reduce_utils.h | tensorflow/lite/kernels/internal/optimized/reduce_utils_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_REDUCE_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_REDUCE_UTILS_H_
#include <stdint.h>
#include <algorithm>
#include <cstring>
namespace tflite {
namespace reduce_utils {
inline void RemoveSize1Dims(int* shape_out, int& out_num_dims, int* axis_out,
int& out_num_axis) {
for (int64_t i = 0; i < out_num_dims;) {
if (shape_out[i] == 1) {
for (int64_t j = i + 1; j < out_num_dims; ++j) {
shape_out[j - 1] = shape_out[j];
}
for (int64_t j = 0; j < out_num_axis; ++j) {
if (axis_out[j] == i) {
for (int64_t k = j + 1; k < out_num_axis; ++k) {
axis_out[k - 1] = axis_out[k];
}
out_num_axis -= 1;
break;
}
}
for (int64_t j = 0; j < out_num_axis; ++j) {
if (axis_out[j] > i) {
axis_out[j] -= 1;
}
}
--out_num_dims;
} else {
++i;
}
}
}
inline bool ResolveAxis(const int num_dims, const int* axis,
const int64_t num_axis, int* axis_out,
int& out_num_axis, const int* shape_in, int* shape_out,
int& out_num_dims) {
if (num_dims == 0) {
out_num_axis = 0;
out_num_dims = 0;
return true;
}
out_num_axis = 0;
out_num_dims = num_dims;
for (int64_t idx = 0; idx < num_axis; ++idx) {
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
if (current < 0 || current >= num_dims) {
return false;
}
bool is_dup = false;
for (int j = 0; j < out_num_axis; ++j) {
if (axis_out[j] == current) {
is_dup = true;
break;
}
}
if (!is_dup) {
axis_out[out_num_axis] = current;
out_num_axis += 1;
}
}
memcpy(shape_out, shape_in, num_dims * sizeof(int));
std::sort(&axis_out[0], &axis_out[out_num_axis]);
RemoveSize1Dims(shape_out, out_num_dims, axis_out, out_num_axis);
if (out_num_axis > 0) {
int64_t j = out_num_axis - 1;
bool previous_here = (axis_out[j] == out_num_dims - 1);
if (previous_here) {
j -= 1;
}
for (int64_t i = out_num_dims - 2; i >= 0; --i) {
bool current_here = j >= 0 ? (axis_out[j] == i) : false;
if (current_here == previous_here) {
shape_out[i] *= shape_out[i + 1];
for (int64_t k = i + 1; k + 1 < out_num_dims; ++k) {
shape_out[k] = shape_out[k + 1];
}
for (int64_t k = 0; k < out_num_axis; ++k) {
if (axis_out[k] > i) {
axis_out[k] -= 1;
}
}
if (current_here) {
for (int64_t k = j + 1; k + 1 < out_num_axis; ++k) {
axis_out[k] = axis_out[k + 1];
}
out_num_axis -= 1;
}
out_num_dims -= 1;
}
if (current_here) {
j -= 1;
}
previous_here = current_here;
}
}
return true;
}
}
}
#endif | #include "tensorflow/lite/kernels/internal/optimized/reduce_utils.h"
#include <gmock/gmock.h>
namespace tflite {
namespace reduce_utils {
namespace {
using ::testing::ElementsAreArray;
void TestFunction(const std::vector<int>& axis_in,
const std::vector<int>& shape_in,
const std::vector<int>& expected_axis_out,
const std::vector<int>& expected_shape_out) {
int num_dims = shape_in.size();
int expected_out_num_dims = expected_shape_out.size();
int actual_out_num_dims;
int expected_out_num_axis = expected_axis_out.size();
int actual_out_num_axis;
std::vector<int> actual_shape_out(num_dims);
std::vector<int> actual_axis_out(num_dims);
ResolveAxis(shape_in.size(), axis_in.data(), axis_in.size(),
actual_axis_out.data(), actual_out_num_axis, shape_in.data(),
actual_shape_out.data(), actual_out_num_dims);
EXPECT_EQ(expected_out_num_dims, actual_out_num_dims);
EXPECT_EQ(expected_out_num_axis, actual_out_num_axis);
EXPECT_THAT(expected_shape_out,
ElementsAreArray(actual_shape_out.data(), expected_out_num_dims));
EXPECT_THAT(expected_axis_out,
ElementsAreArray(actual_axis_out.data(), expected_out_num_axis));
}
TEST(ResolveAxisTest, Flatten_0_1_2) {
const std::vector<int> axis_in = {0, 1, 2};
const std::vector<int> shape_in = {2, 3, 4, 5};
const std::vector<int> expected_shape_out{24, 5};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, Flatten_0_1_2_3) {
const std::vector<int> axis_in = {3, 2};
const std::vector<int> shape_in = {2, 3, 4, 5};
const std::vector<int> expected_shape_out{6, 20};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, ZeroDims) {
const std::vector<int> axis_in = {};
const std::vector<int> shape_in = {};
const std::vector<int> expected_shape_out{};
const std::vector<int> expected_axis_out{};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, DoNothing) {
const std::vector<int> axis_in = {0};
const std::vector<int> shape_in = {4, 5};
const std::vector<int> expected_shape_out{4, 5};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, NegativeAxis) {
const std::vector<int> axis_in = {-2};
const std::vector<int> shape_in = {4, 3};
const std::vector<int> expected_shape_out{4, 3};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, NegativeAxisFold) {
const std::vector<int> axis_in = {-1};
const std::vector<int> shape_in = {4, 3, 5};
const std::vector<int> expected_shape_out{12, 5};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, DuplicateAxis) {
const std::vector<int> axis_in = {2, 1, 2, 1, 2, 1};
const std::vector<int> shape_in = {4, 3, 2};
const std::vector<int> expected_shape_out{4, 6};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, DuplicateNegativeAxis) {
const std::vector<int> axis_in = {2, -1, -2, -1, 2, 1};
const std::vector<int> shape_in = {4, 3, 2};
const std::vector<int> expected_shape_out{4, 6};
const std::vector<int> expected_axis_out{1};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, RemoveSize1Dim) {
const std::vector<int> axis_in = {0};
const std::vector<int> shape_in = {1, 4, 3, 1};
const std::vector<int> expected_shape_out{4, 3};
const std::vector<int> expected_axis_out{};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, OneSize1DimToScalar) {
const std::vector<int> axis_in = {0};
const std::vector<int> shape_in = {1};
const std::vector<int> expected_shape_out{};
const std::vector<int> expected_axis_out{};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
TEST(ResolveAxisTest, InterleavedSize1Dim) {
const std::vector<int> axis_in = {1, 3};
const std::vector<int> shape_in = {1, 2, 1, 4, 1, 7};
const std::vector<int> expected_shape_out{8, 7};
const std::vector<int> expected_axis_out{0};
TestFunction(axis_in, shape_in, expected_axis_out, expected_shape_out);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/reduce_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/reduce_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d346eaeb-6eef-4e54-a536-922f731fe6d3 | cpp | google/googletest | gmock-more-actions | googlemock/include/gmock/gmock-more-actions.h | googlemock/test/gmock-more-actions_test.cc | #ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
#include <memory>
#include <utility>
#include "gmock/gmock-actions.h"
#include "gmock/internal/gmock-port.h"
#include "gmock/internal/custom/gmock-generated-actions.h"
#define GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(kind0, name0) kind0 name0
#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, name1) \
kind0 name0, kind1 name1
#define GMOCK_INTERNAL_DECL_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
kind2, name2) \
kind0 name0, kind1 name1, kind2 name2
#define GMOCK_INTERNAL_DECL_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
kind2, name2, kind3, name3) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3
#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4
#define GMOCK_INTERNAL_DECL_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
kind2, name2, kind3, name3, \
kind4, name4, kind5, name5) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5
#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \
kind5 name5, kind6 name6
#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6, kind7, name7) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \
kind5 name5, kind6 name6, kind7 name7
#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6, kind7, name7, kind8, name8) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \
kind5 name5, kind6 name6, kind7 name7, kind8 name8
#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6, kind7, name7, kind8, name8, kind9, name9) \
kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \
kind5 name5, kind6 name6, kind7 name7, kind8 name8, kind9 name9
#define GMOCK_INTERNAL_LIST_HAS_1_TEMPLATE_PARAMS(kind0, name0) name0
#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, name1) \
name0, name1
#define GMOCK_INTERNAL_LIST_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
kind2, name2) \
name0, name1, name2
#define GMOCK_INTERNAL_LIST_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
kind2, name2, kind3, name3) \
name0, name1, name2, name3
#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4) \
name0, name1, name2, name3, name4
#define GMOCK_INTERNAL_LIST_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
kind2, name2, kind3, name3, \
kind4, name4, kind5, name5) \
name0, name1, name2, name3, name4, name5
#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6) \
name0, name1, name2, name3, name4, name5, name6
#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6, kind7, name7) \
name0, name1, name2, name3, name4, name5, name6, name7
#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6, kind7, name7, kind8, name8) \
name0, name1, name2, name3, name4, name5, name6, name7, name8
#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS( \
kind0, name0, kind1, name1, kind2, name2, kind3, name3, kind4, name4, \
kind5, name5, kind6, name6, kind7, name7, kind8, name8, kind9, name9) \
name0, name1, name2, name3, name4, name5, name6, name7, name8, name9
#define GMOCK_INTERNAL_DECL_TYPE_AND_0_VALUE_PARAMS()
#define GMOCK_INTERNAL_DECL_TYPE_AND_1_VALUE_PARAMS(p0) , typename p0##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) \
, typename p0##_type, typename p1##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) \
, typename p0##_type, typename p1##_type, typename p2##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type, typename p4##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type, typename p4##_type, typename p5##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type, typename p4##_type, typename p5##_type, \
typename p6##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6, p7) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type, typename p4##_type, typename p5##_type, \
typename p6##_type, typename p7##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6, p7, p8) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type, typename p4##_type, typename p5##_type, \
typename p6##_type, typename p7##_type, typename p8##_type
#define GMOCK_INTERNAL_DECL_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6, p7, p8, p9) \
, typename p0##_type, typename p1##_type, typename p2##_type, \
typename p3##_type, typename p4##_type, typename p5##_type, \
typename p6##_type, typename p7##_type, typename p8##_type, \
typename p9##_type
#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS() ()
#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0) \
(p0##_type gmock_p0) : p0(::std::move(gmock_p0))
#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1) \
(p0##_type gmock_p0, p1##_type gmock_p1) \
: p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1))
#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2))
#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3))
#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3, p4##_type gmock_p4) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3)), \
p4(::std::move(gmock_p4))
#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3)), \
p4(::std::move(gmock_p4)), \
p5(::std::move(gmock_p5))
#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
p6##_type gmock_p6) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3)), \
p4(::std::move(gmock_p4)), \
p5(::std::move(gmock_p5)), \
p6(::std::move(gmock_p6))
#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
p6##_type gmock_p6, p7##_type gmock_p7) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3)), \
p4(::std::move(gmock_p4)), \
p5(::std::move(gmock_p5)), \
p6(::std::move(gmock_p6)), \
p7(::std::move(gmock_p7))
#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \
p8) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3)), \
p4(::std::move(gmock_p4)), \
p5(::std::move(gmock_p5)), \
p6(::std::move(gmock_p6)), \
p7(::std::move(gmock_p7)), \
p8(::std::move(gmock_p8))
#define GMOCK_INTERNAL_INIT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7, p8, p9) \
(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
p9##_type gmock_p9) \
: p0(::std::move(gmock_p0)), \
p1(::std::move(gmock_p1)), \
p2(::std::move(gmock_p2)), \
p3(::std::move(gmock_p3)), \
p4(::std::move(gmock_p4)), \
p5(::std::move(gmock_p5)), \
p6(::std::move(gmock_p6)), \
p7(::std::move(gmock_p7)), \
p8(::std::move(gmock_p8)), \
p9(::std::move(gmock_p9))
#define GMOCK_INTERNAL_DEFN_COPY_AND_0_VALUE_PARAMS() \
{}
#define GMOCK_INTERNAL_DEFN_COPY_AND_1_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_2_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_3_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_4_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_5_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_6_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_7_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_8_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_9_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_COPY_AND_10_VALUE_PARAMS(...) = default;
#define GMOCK_INTERNAL_DEFN_AND_0_VALUE_PARAMS()
#define GMOCK_INTERNAL_DEFN_AND_1_VALUE_PARAMS(p0) p0##_type p0;
#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) \
p0##_type p0; \
p1##_type p1;
#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2;
#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3;
#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3; \
p4##_type p4;
#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3; \
p4##_type p4; \
p5##_type p5;
#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3; \
p4##_type p4; \
p5##_type p5; \
p6##_type p6;
#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3; \
p4##_type p4; \
p5##_type p5; \
p6##_type p6; \
p7##_type p7;
#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \
p8) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3; \
p4##_type p4; \
p5##_type p5; \
p6##_type p6; \
p7##_type p7; \
p8##_type p8;
#define GMOCK_INTERNAL_DEFN_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7, p8, p9) \
p0##_type p0; \
p1##_type p1; \
p2##_type p2; \
p3##_type p3; \
p4##_type p4; \
p5##_type p5; \
p6##_type p6; \
p7##_type p7; \
p8##_type p8; \
p9##_type p9;
#define GMOCK_INTERNAL_LIST_AND_0_VALUE_PARAMS()
#define GMOCK_INTERNAL_LIST_AND_1_VALUE_PARAMS(p0) p0
#define GMOCK_INTERNAL_LIST_AND_2_VALUE_PARAMS(p0, p1) p0, p1
#define GMOCK_INTERNAL_LIST_AND_3_VALUE_PARAMS(p0, p1, p2) p0, p1, p2
#define GMOCK_INTERNAL_LIST_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0, p1, p2, p3
#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \
p0, p1, p2, p3, p4
#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \
p0, p1, p2, p3, p4, p5
#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \
p0, p1, p2, p3, p4, p5, p6
#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \
p0, p1, p2, p3, p4, p5, p6, p7
#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \
p8) \
p0, p1, p2, p3, p4, p5, p6, p7, p8
#define GMOCK_INTERNAL_LIST_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7, p8, p9) \
p0, p1, p2, p3, p4, p5, p6, p7, p8, p9
#define GMOCK_INTERNAL_LIST_TYPE_AND_0_VALUE_PARAMS()
#define GMOCK_INTERNAL_LIST_TYPE_AND_1_VALUE_PARAMS(p0) , p0##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) \
, p0##_type, p1##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) \
, p0##_type, p1##_type, p2##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \
, p0##_type, p1##_type, p2##_type, p3##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \
, p0##_type, p1##_type, p2##_type, p3##_type, p4##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \
, p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6) \
, p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, p6##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6, p7) \
, p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \
p6##_type, p7##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6, p7, p8) \
, p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \
p6##_type, p7##_type, p8##_type
#define GMOCK_INTERNAL_LIST_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
p6, p7, p8, p9) \
, p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \
p6##_type, p7##_type, p8##_type, p9##_type
#define GMOCK_INTERNAL_DECL_AND_0_VALUE_PARAMS()
#define GMOCK_INTERNAL_DECL_AND_1_VALUE_PARAMS(p0) p0##_type p0
#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) \
p0##_type p0, p1##_type p1
#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) \
p0##_type p0, p1##_type p1, p2##_type p2
#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3
#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4
#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
p5##_type p5
#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
p5##_type p5, p6##_type p6
#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
p5##_type p5, p6##_type p6, p7##_type p7
#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, \
p8) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8
#define GMOCK_INTERNAL_DECL_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7, p8, p9) \
p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, p9##_type p9
#define GMOCK_INTERNAL_COUNT_AND_0_VALUE_PARAMS()
#define GMOCK_INTERNAL_COUNT_AND_1_VALUE_PARAMS(p0) P
#define GMOCK_INTERNAL_COUNT_AND_2_VALUE_PARAMS(p0, p1) P2
#define GMOCK_INTERNAL_COUNT_AND_3_VALUE_PARAMS(p0, p1, p2) P3
#define GMOCK_INTERNAL_COUNT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) P4
#define GMOCK_INTERNAL_COUNT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) P5
#define GMOCK_INTERNAL_COUNT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) P6
#define GMOCK_INTERNAL_COUNT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) P7
#define GMOCK_INTERNAL_COUNT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7) \
P8
#define GMOCK_INTERNAL_COUNT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
P9
#define GMOCK_INTERNAL_COUNT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
p7, p8, p9) \
P10
#define GMOCK_ACTION_CLASS_(name, value_params) \
GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params)
#define ACTION_TEMPLATE(name, template_params, value_params) \
template <GMOCK_INTERNAL_DECL_##template_params \
GMOCK_INTERNAL_DECL_TYPE_##value_params> \
class GMOCK_ACTION_CLASS_(name, value_params) { \
public: \
explicit GMOCK_ACTION_CLASS_(name, value_params)( \
GMOCK_INTERNAL_DECL_##value_params) \
GMOCK_PP_IF(GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), \
= default; \
, \
: impl_(std::make_shared<gmock_Impl>( \
GMOCK_INTERNAL_LIST_##value_params)){}) \
GMOCK_ACTION_CLASS_(name, value_params)(const GMOCK_ACTION_CLASS_( \
name, value_params) &) noexcept GMOCK_INTERNAL_DEFN_COPY_ \
##value_params \
GMOCK_ACTION_CLASS_(name, value_params)(GMOCK_ACTION_CLASS_( \
name, value_params) &&) noexcept GMOCK_INTERNAL_DEFN_COPY_ \
##value_params template <typename F> \
operator ::testing::Action<F>() const { \
return GMOCK_PP_IF( \
GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), \
(::testing::internal::MakeAction<F, gmock_Impl>()), \
(::testing::internal::MakeAction<F>(impl_))); \
} \
\
private: \
class gmock_Impl { \
public: \
explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {} \
template <typename function_type, typename return_type, \
typename args_type, GMOCK_ACTION_TEMPLATE_ARGS_NAMES_> \
return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \
GMOCK_INTERNAL_DEFN_##value_params \
}; \
GMOCK_PP_IF(GMOCK_PP_IS_EMPTY(GMOCK_INTERNAL_COUNT_##value_params), , \
std::shared_ptr<const gmock_Impl> impl_;) \
}; \
template <GMOCK_INTERNAL_DECL_##template_params \
GMOCK_INTERNAL_DECL_TYPE_##value_params> \
GMOCK_ACTION_CLASS_( \
name, value_params)<GMOCK_INTERNAL_LIST_##template_params \
GMOCK_INTERNAL_LIST_TYPE_##value_params> \
name(GMOCK_INTERNAL_DECL_##value_params) GTEST_MUST_USE_RESULT_; \
template <GMOCK_INTERNAL_DECL_##template_params \
GMOCK_INTERNAL_DECL_TYPE_##value_params> \
inline GMOCK_ACTION_CLASS_( \
name, value_params)<GMOCK_INTERNAL_LIST_##template_params \
GMOCK_INTERNAL_LIST_TYPE_##value_params> \
name(GMOCK_INTERNAL_DECL_##value_params) { \
return GMOCK_ACTION_CLASS_( \
name, value_params)<GMOCK_INTERNAL_LIST_##template_params \
GMOCK_INTERNAL_LIST_TYPE_##value_params>( \
GMOCK_INTERNAL_LIST_##value_params); \
} \
template <GMOCK_INTERNAL_DECL_##template_params \
GMOCK_INTERNAL_DECL_TYPE_##value_params> \
template <typename function_type, typename return_type, typename args_type, \
GMOCK_ACTION_TEMPLATE_ARGS_NAMES_> \
return_type GMOCK_ACTION_CLASS_( \
name, value_params)<GMOCK_INTERNAL_LIST_##template_params \
GMOCK_INTERNAL_LIST_TYPE_##value_params>:: \
gmock_Impl::gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) \
const
namespace testing {
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4100)
namespace internal {
template <typename F, typename... Args>
auto InvokeArgument(F &&f,
Args... args) -> decltype(std::forward<F>(f)(args...)) {
return std::forward<F>(f)(args...);
}
template <std::size_t index, typename... Params>
struct InvokeArgumentAction {
template <typename... Args,
typename = typename std::enable_if<(index < sizeof...(Args))>::type>
auto operator()(Args &&...args) const
-> decltype(internal::InvokeArgument(
std::get<index>(std::forward_as_tuple(std::forward<Args>(args)...)),
std::declval<const Params &>()...)) {
internal::FlatTuple<Args &&...> args_tuple(FlatTupleConstructTag{},
std::forward<Args>(args)...);
return params.Apply([&](const Params &...unpacked_params) {
auto &&callable = std::move(args_tuple.template Get<index>());
return internal::InvokeArgument(
std::forward<decltype(callable)>(callable), unpacked_params...);
});
}
internal::FlatTuple<Params...> params;
};
}
template <std::size_t index, typename... Params>
internal::InvokeArgumentAction<index, typename std::decay<Params>::type...>
InvokeArgument(Params &&...params) {
return {internal::FlatTuple<typename std::decay<Params>::type...>(
internal::FlatTupleConstructTag{}, std::forward<Params>(params)...)};
}
GTEST_DISABLE_MSC_WARNINGS_POP_()
}
#endif | #include "gmock/gmock-more-actions.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4577)
namespace testing {
namespace gmock_more_actions_test {
using ::std::plus;
using ::std::string;
using testing::Action;
using testing::DeleteArg;
using testing::Invoke;
using testing::ReturnArg;
using testing::ReturnPointee;
using testing::SaveArg;
using testing::SaveArgPointee;
using testing::SetArgReferee;
using testing::Unused;
using testing::WithArg;
using testing::WithoutArgs;
inline short Short(short n) { return n; }
inline char Char(char ch) { return ch; }
int Nullary() { return 1; }
bool g_done = false;
bool Unary(int x) { return x < 0; }
bool ByConstRef(const std::string& s) { return s == "Hi"; }
const double g_double = 0;
bool ReferencesGlobalDouble(const double& x) { return &x == &g_double; }
struct UnaryFunctor {
int operator()(bool x) { return x ? 1 : -1; }
};
struct UnaryMoveOnlyFunctor : UnaryFunctor {
UnaryMoveOnlyFunctor() = default;
UnaryMoveOnlyFunctor(const UnaryMoveOnlyFunctor&) = delete;
UnaryMoveOnlyFunctor(UnaryMoveOnlyFunctor&&) = default;
};
struct OneShotUnaryFunctor {
int operator()(bool x) && { return x ? 1 : -1; }
};
const char* Binary(const char* input, short n) { return input + n; }
int Ternary(int x, char y, short z) { return x + y + z; }
int SumOf4(int a, int b, int c, int d) { return a + b + c + d; }
int SumOfFirst2(int a, int b, Unused, Unused) { return a + b; }
int SumOf5(int a, int b, int c, int d, int e) { return a + b + c + d + e; }
struct SumOf5Functor {
int operator()(int a, int b, int c, int d, int e) {
return a + b + c + d + e;
}
};
int SumOf6(int a, int b, int c, int d, int e, int f) {
return a + b + c + d + e + f;
}
struct SumOf6Functor {
int operator()(int a, int b, int c, int d, int e, int f) {
return a + b + c + d + e + f;
}
};
std::string Concat7(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7;
}
std::string Concat8(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7, const char* s8) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7 + s8;
}
std::string Concat9(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7, const char* s8, const char* s9) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9;
}
std::string Concat10(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7, const char* s8, const char* s9,
const char* s10) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10;
}
class Foo {
public:
Foo() : value_(123) {}
int Nullary() const { return value_; }
short Unary(long x) { return static_cast<short>(value_ + x); }
std::string Binary(const std::string& str, char c) const { return str + c; }
int Ternary(int x, bool y, char z) { return value_ + x + y * z; }
int SumOf4(int a, int b, int c, int d) const {
return a + b + c + d + value_;
}
int SumOfLast2(Unused, Unused, int a, int b) const { return a + b; }
int SumOf5(int a, int b, int c, int d, int e) { return a + b + c + d + e; }
int SumOf6(int a, int b, int c, int d, int e, int f) {
return a + b + c + d + e + f;
}
std::string Concat7(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7;
}
std::string Concat8(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7, const char* s8) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7 + s8;
}
std::string Concat9(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7, const char* s8, const char* s9) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9;
}
std::string Concat10(const char* s1, const char* s2, const char* s3,
const char* s4, const char* s5, const char* s6,
const char* s7, const char* s8, const char* s9,
const char* s10) {
return std::string(s1) + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10;
}
private:
int value_;
};
TEST(InvokeTest, Nullary) {
Action<int()> a = Invoke(Nullary);
EXPECT_EQ(1, a.Perform(std::make_tuple()));
}
TEST(InvokeTest, Unary) {
Action<bool(int)> a = Invoke(Unary);
EXPECT_FALSE(a.Perform(std::make_tuple(1)));
EXPECT_TRUE(a.Perform(std::make_tuple(-1)));
}
TEST(InvokeTest, Binary) {
Action<const char*(const char*, short)> a = Invoke(Binary);
const char* p = "Hello";
EXPECT_EQ(p + 2, a.Perform(std::make_tuple(p, Short(2))));
}
TEST(InvokeTest, Ternary) {
Action<int(int, char, short)> a = Invoke(Ternary);
EXPECT_EQ(6, a.Perform(std::make_tuple(1, '\2', Short(3))));
}
TEST(InvokeTest, FunctionThatTakes4Arguments) {
Action<int(int, int, int, int)> a = Invoke(SumOf4);
EXPECT_EQ(1234, a.Perform(std::make_tuple(1000, 200, 30, 4)));
}
TEST(InvokeTest, FunctionThatTakes5Arguments) {
Action<int(int, int, int, int, int)> a = Invoke(SumOf5);
EXPECT_EQ(12345, a.Perform(std::make_tuple(10000, 2000, 300, 40, 5)));
}
TEST(InvokeTest, FunctionThatTakes6Arguments) {
Action<int(int, int, int, int, int, int)> a = Invoke(SumOf6);
EXPECT_EQ(123456,
a.Perform(std::make_tuple(100000, 20000, 3000, 400, 50, 6)));
}
inline const char* CharPtr(const char* s) { return s; }
TEST(InvokeTest, FunctionThatTakes7Arguments) {
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*)>
a = Invoke(Concat7);
EXPECT_EQ("1234567",
a.Perform(std::make_tuple(CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"))));
}
TEST(InvokeTest, FunctionThatTakes8Arguments) {
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*)>
a = Invoke(Concat8);
EXPECT_EQ("12345678",
a.Perform(std::make_tuple(CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"), CharPtr("8"))));
}
TEST(InvokeTest, FunctionThatTakes9Arguments) {
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*,
const char*)>
a = Invoke(Concat9);
EXPECT_EQ("123456789", a.Perform(std::make_tuple(
CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"), CharPtr("8"), CharPtr("9"))));
}
TEST(InvokeTest, FunctionThatTakes10Arguments) {
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*,
const char*, const char*)>
a = Invoke(Concat10);
EXPECT_EQ("1234567890",
a.Perform(std::make_tuple(CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"), CharPtr("8"), CharPtr("9"),
CharPtr("0"))));
}
TEST(InvokeTest, FunctionWithUnusedParameters) {
Action<int(int, int, double, const std::string&)> a1 = Invoke(SumOfFirst2);
std::tuple<int, int, double, std::string> dummy =
std::make_tuple(10, 2, 5.6, std::string("hi"));
EXPECT_EQ(12, a1.Perform(dummy));
Action<int(int, int, bool, int*)> a2 = Invoke(SumOfFirst2);
EXPECT_EQ(
23, a2.Perform(std::make_tuple(20, 3, true, static_cast<int*>(nullptr))));
}
TEST(InvokeTest, MethodWithUnusedParameters) {
Foo foo;
Action<int(std::string, bool, int, int)> a1 = Invoke(&foo, &Foo::SumOfLast2);
EXPECT_EQ(12, a1.Perform(std::make_tuple(CharPtr("hi"), true, 10, 2)));
Action<int(char, double, int, int)> a2 = Invoke(&foo, &Foo::SumOfLast2);
EXPECT_EQ(23, a2.Perform(std::make_tuple('a', 2.5, 20, 3)));
}
TEST(InvokeTest, Functor) {
Action<long(long, int)> a = Invoke(plus<long>());
EXPECT_EQ(3L, a.Perform(std::make_tuple(1, 2)));
}
TEST(InvokeTest, FunctionWithCompatibleType) {
Action<long(int, short, char, bool)> a = Invoke(SumOf4);
EXPECT_EQ(4321, a.Perform(std::make_tuple(4000, Short(300), Char(20), true)));
}
TEST(InvokeMethodTest, Nullary) {
Foo foo;
Action<int()> a = Invoke(&foo, &Foo::Nullary);
EXPECT_EQ(123, a.Perform(std::make_tuple()));
}
TEST(InvokeMethodTest, Unary) {
Foo foo;
Action<short(long)> a = Invoke(&foo, &Foo::Unary);
EXPECT_EQ(4123, a.Perform(std::make_tuple(4000)));
}
TEST(InvokeMethodTest, Binary) {
Foo foo;
Action<std::string(const std::string&, char)> a = Invoke(&foo, &Foo::Binary);
std::string s("Hell");
std::tuple<std::string, char> dummy = std::make_tuple(s, 'o');
EXPECT_EQ("Hello", a.Perform(dummy));
}
TEST(InvokeMethodTest, Ternary) {
Foo foo;
Action<int(int, bool, char)> a = Invoke(&foo, &Foo::Ternary);
EXPECT_EQ(1124, a.Perform(std::make_tuple(1000, true, Char(1))));
}
TEST(InvokeMethodTest, MethodThatTakes4Arguments) {
Foo foo;
Action<int(int, int, int, int)> a = Invoke(&foo, &Foo::SumOf4);
EXPECT_EQ(1357, a.Perform(std::make_tuple(1000, 200, 30, 4)));
}
TEST(InvokeMethodTest, MethodThatTakes5Arguments) {
Foo foo;
Action<int(int, int, int, int, int)> a =
Invoke(&foo, &Foo::SumOf5);
EXPECT_EQ(12345, a.Perform(std::make_tuple(10000, 2000, 300, 40, 5)));
}
TEST(InvokeMethodTest, MethodThatTakes6Arguments) {
Foo foo;
Action<int(int, int, int, int, int, int)> a =
Invoke(&foo, &Foo::SumOf6);
EXPECT_EQ(123456,
a.Perform(std::make_tuple(100000, 20000, 3000, 400, 50, 6)));
}
TEST(InvokeMethodTest, MethodThatTakes7Arguments) {
Foo foo;
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*)>
a = Invoke(&foo, &Foo::Concat7);
EXPECT_EQ("1234567",
a.Perform(std::make_tuple(CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"))));
}
TEST(InvokeMethodTest, MethodThatTakes8Arguments) {
Foo foo;
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*)>
a = Invoke(&foo, &Foo::Concat8);
EXPECT_EQ("12345678",
a.Perform(std::make_tuple(CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"), CharPtr("8"))));
}
TEST(InvokeMethodTest, MethodThatTakes9Arguments) {
Foo foo;
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*,
const char*)>
a = Invoke(&foo, &Foo::Concat9);
EXPECT_EQ("123456789", a.Perform(std::make_tuple(
CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"), CharPtr("8"), CharPtr("9"))));
}
TEST(InvokeMethodTest, MethodThatTakes10Arguments) {
Foo foo;
Action<std::string(const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*,
const char*, const char*)>
a = Invoke(&foo, &Foo::Concat10);
EXPECT_EQ("1234567890",
a.Perform(std::make_tuple(CharPtr("1"), CharPtr("2"), CharPtr("3"),
CharPtr("4"), CharPtr("5"), CharPtr("6"),
CharPtr("7"), CharPtr("8"), CharPtr("9"),
CharPtr("0"))));
}
TEST(InvokeMethodTest, MethodWithCompatibleType) {
Foo foo;
Action<long(int, short, char, bool)> a =
Invoke(&foo, &Foo::SumOf4);
EXPECT_EQ(4444, a.Perform(std::make_tuple(4000, Short(300), Char(20), true)));
}
TEST(WithoutArgsTest, NoArg) {
Action<int(int n)> a = WithoutArgs(Invoke(Nullary));
EXPECT_EQ(1, a.Perform(std::make_tuple(2)));
}
TEST(WithArgTest, OneArg) {
Action<bool(double x, int n)> b = WithArg<1>(Invoke(Unary));
EXPECT_TRUE(b.Perform(std::make_tuple(1.5, -1)));
EXPECT_FALSE(b.Perform(std::make_tuple(1.5, 1)));
}
TEST(ReturnArgActionTest, WorksForOneArgIntArg0) {
const Action<int(int)> a = ReturnArg<0>();
EXPECT_EQ(5, a.Perform(std::make_tuple(5)));
}
TEST(ReturnArgActionTest, WorksForMultiArgBoolArg0) {
const Action<bool(bool, bool, bool)> a = ReturnArg<0>();
EXPECT_TRUE(a.Perform(std::make_tuple(true, false, false)));
}
TEST(ReturnArgActionTest, WorksForMultiArgStringArg2) {
const Action<std::string(int, int, std::string, int)> a = ReturnArg<2>();
EXPECT_EQ("seven", a.Perform(std::make_tuple(5, 6, std::string("seven"), 8)));
}
TEST(ReturnArgActionTest, WorksForNonConstRefArg0) {
const Action<std::string&(std::string&)> a = ReturnArg<0>();
std::string s = "12345";
EXPECT_EQ(&s, &a.Perform(std::forward_as_tuple(s)));
}
TEST(SaveArgActionTest, WorksForSameType) {
int result = 0;
const Action<void(int n)> a1 = SaveArg<0>(&result);
a1.Perform(std::make_tuple(5));
EXPECT_EQ(5, result);
}
TEST(SaveArgActionTest, WorksForCompatibleType) {
int result = 0;
const Action<void(bool, char)> a1 = SaveArg<1>(&result);
a1.Perform(std::make_tuple(true, 'a'));
EXPECT_EQ('a', result);
}
TEST(SaveArgPointeeActionTest, WorksForSameType) {
int result = 0;
const int value = 5;
const Action<void(const int*)> a1 = SaveArgPointee<0>(&result);
a1.Perform(std::make_tuple(&value));
EXPECT_EQ(5, result);
}
TEST(SaveArgPointeeActionTest, WorksForCompatibleType) {
int result = 0;
char value = 'a';
const Action<void(bool, char*)> a1 = SaveArgPointee<1>(&result);
a1.Perform(std::make_tuple(true, &value));
EXPECT_EQ('a', result);
}
TEST(SetArgRefereeActionTest, WorksForSameType) {
int value = 0;
const Action<void(int&)> a1 = SetArgReferee<0>(1);
a1.Perform(std::tuple<int&>(value));
EXPECT_EQ(1, value);
}
TEST(SetArgRefereeActionTest, WorksForCompatibleType) {
int value = 0;
const Action<void(int, int&)> a1 = SetArgReferee<1>('a');
a1.Perform(std::tuple<int, int&>(0, value));
EXPECT_EQ('a', value);
}
TEST(SetArgRefereeActionTest, WorksWithExtraArguments) {
int value = 0;
const Action<void(bool, int, int&, const char*)> a1 = SetArgReferee<2>('a');
a1.Perform(std::tuple<bool, int, int&, const char*>(true, 0, value, "hi"));
EXPECT_EQ('a', value);
}
class DeletionTester {
public:
explicit DeletionTester(bool* is_deleted) : is_deleted_(is_deleted) {
*is_deleted_ = false;
}
~DeletionTester() { *is_deleted_ = true; }
private:
bool* is_deleted_;
};
TEST(DeleteArgActionTest, OneArg) {
bool is_deleted = false;
DeletionTester* t = new DeletionTester(&is_deleted);
const Action<void(DeletionTester*)> a1 = DeleteArg<0>();
EXPECT_FALSE(is_deleted);
a1.Perform(std::make_tuple(t));
EXPECT_TRUE(is_deleted);
}
TEST(DeleteArgActionTest, TenArgs) {
bool is_deleted = false;
DeletionTester* t = new DeletionTester(&is_deleted);
const Action<void(bool, int, int, const char*, bool, int, int, int, int,
DeletionTester*)>
a1 = DeleteArg<9>();
EXPECT_FALSE(is_deleted);
a1.Perform(std::make_tuple(true, 5, 6, CharPtr("hi"), false, 7, 8, 9, 10, t));
EXPECT_TRUE(is_deleted);
}
#if GTEST_HAS_EXCEPTIONS
TEST(ThrowActionTest, ThrowsGivenExceptionInVoidFunction) {
const Action<void(int n)> a = Throw('a');
EXPECT_THROW(a.Perform(std::make_tuple(0)), char);
}
class MyException {};
TEST(ThrowActionTest, ThrowsGivenExceptionInNonVoidFunction) {
const Action<double(char ch)> a = Throw(MyException());
EXPECT_THROW(a.Perform(std::make_tuple('0')), MyException);
}
TEST(ThrowActionTest, ThrowsGivenExceptionInNullaryFunction) {
const Action<double()> a = Throw(MyException());
EXPECT_THROW(a.Perform(std::make_tuple()), MyException);
}
class Object {
public:
virtual ~Object() {}
virtual void Func() {}
};
class MockObject : public Object {
public:
~MockObject() override {}
MOCK_METHOD(void, Func, (), (override));
};
TEST(ThrowActionTest, Times0) {
EXPECT_NONFATAL_FAILURE(
[] {
try {
MockObject m;
ON_CALL(m, Func()).WillByDefault([] { throw "something"; });
EXPECT_CALL(m, Func()).Times(0);
m.Func();
} catch (...) {
}
}(),
"");
}
#endif
TEST(SetArrayArgumentTest, SetsTheNthArray) {
using MyFunction = void(bool, int*, char*);
int numbers[] = {1, 2, 3};
Action<MyFunction> a = SetArrayArgument<1>(numbers, numbers + 3);
int n[4] = {};
int* pn = n;
char ch[4] = {};
char* pch = ch;
a.Perform(std::make_tuple(true, pn, pch));
EXPECT_EQ(1, n[0]);
EXPECT_EQ(2, n[1]);
EXPECT_EQ(3, n[2]);
EXPECT_EQ(0, n[3]);
EXPECT_EQ('\0', ch[0]);
EXPECT_EQ('\0', ch[1]);
EXPECT_EQ('\0', ch[2]);
EXPECT_EQ('\0', ch[3]);
std::string letters = "abc";
a = SetArrayArgument<2>(letters.begin(), letters.end());
std::fill_n(n, 4, 0);
std::fill_n(ch, 4, '\0');
a.Perform(std::make_tuple(true, pn, pch));
EXPECT_EQ(0, n[0]);
EXPECT_EQ(0, n[1]);
EXPECT_EQ(0, n[2]);
EXPECT_EQ(0, n[3]);
EXPECT_EQ('a', ch[0]);
EXPECT_EQ('b', ch[1]);
EXPECT_EQ('c', ch[2]);
EXPECT_EQ('\0', ch[3]);
}
TEST(SetArrayArgumentTest, SetsTheNthArrayWithEmptyRange) {
using MyFunction = void(bool, int*);
int numbers[] = {1, 2, 3};
Action<MyFunction> a = SetArrayArgument<1>(numbers, numbers);
int n[4] = {};
int* pn = n;
a.Perform(std::make_tuple(true, pn));
EXPECT_EQ(0, n[0]);
EXPECT_EQ(0, n[1]);
EXPECT_EQ(0, n[2]);
EXPECT_EQ(0, n[3]);
}
TEST(SetArrayArgumentTest, SetsTheNthArrayWithConvertibleType) {
using MyFunction = void(bool, int*);
char chars[] = {97, 98, 99};
Action<MyFunction> a = SetArrayArgument<1>(chars, chars + 3);
int codes[4] = {111, 222, 333, 444};
int* pcodes = codes;
a.Perform(std::make_tuple(true, pcodes));
EXPECT_EQ(97, codes[0]);
EXPECT_EQ(98, codes[1]);
EXPECT_EQ(99, codes[2]);
EXPECT_EQ(444, codes[3]);
}
TEST(SetArrayArgumentTest, SetsTheNthArrayWithIteratorArgument) {
using MyFunction = void(bool, std::back_insert_iterator<std::string>);
std::string letters = "abc";
Action<MyFunction> a = SetArrayArgument<1>(letters.begin(), letters.end());
std::string s;
a.Perform(std::make_tuple(true, std::back_inserter(s)));
EXPECT_EQ(letters, s);
}
TEST(ReturnPointeeTest, Works) {
int n = 42;
const Action<int()> a = ReturnPointee(&n);
EXPECT_EQ(42, a.Perform(std::make_tuple()));
n = 43;
EXPECT_EQ(43, a.Perform(std::make_tuple()));
}
TEST(InvokeArgumentTest, Function0) {
Action<int(int, int (*)())> a = InvokeArgument<1>();
EXPECT_EQ(1, a.Perform(std::make_tuple(2, &Nullary)));
}
TEST(InvokeArgumentTest, Functor1) {
Action<int(UnaryFunctor)> a = InvokeArgument<0>(true);
EXPECT_EQ(1, a.Perform(std::make_tuple(UnaryFunctor())));
}
TEST(InvokeArgumentTest, Functor1MoveOnly) {
Action<int(UnaryMoveOnlyFunctor)> a = InvokeArgument<0>(true);
EXPECT_EQ(1, a.Perform(std::make_tuple(UnaryMoveOnlyFunctor())));
}
TEST(InvokeArgumentTest, OneShotFunctor1) {
Action<int(OneShotUnaryFunctor)> a = InvokeArgument<0>(true);
EXPECT_EQ(1, a.Perform(std::make_tuple(OneShotUnaryFunctor())));
}
TEST(InvokeArgumentTest, Function5) {
Action<int(int (*)(int, int, int, int, int))> a =
InvokeArgument<0>(10000, 2000, 300, 40, 5);
EXPECT_EQ(12345, a.Perform(std::make_tuple(&SumOf5)));
}
TEST(InvokeArgumentTest, Functor5) {
Action<int(SumOf5Functor)> a =
InvokeArgument<0>(10000, 2000, 300, 40, 5);
EXPECT_EQ(12345, a.Perform(std::make_tuple(SumOf5Functor())));
}
TEST(InvokeArgumentTest, Function6) {
Action<int(int (*)(int, int, int, int, int, int))> a =
InvokeArgument<0>(100000, 20000, 3000, 400, 50, 6);
EXPECT_EQ(123456, a.Perform(std::make_tuple(&SumOf6)));
}
TEST(InvokeArgumentTest, Functor6) {
Action<int(SumOf6Functor)> a =
InvokeArgument<0>(100000, 20000, 3000, 400, 50, 6);
EXPECT_EQ(123456, a.Perform(std::make_tuple(SumOf6Functor())));
}
TEST(InvokeArgumentTest, Function7) {
Action<std::string(std::string(*)(const char*, const char*, const char*,
const char*, const char*, const char*,
const char*))>
a = InvokeArgument<0>("1", "2", "3", "4", "5", "6", "7");
EXPECT_EQ("1234567", a.Perform(std::make_tuple(&Concat7)));
}
TEST(InvokeArgumentTest, Function8) {
Action<std::string(std::string(*)(const char*, const char*, const char*,
const char*, const char*, const char*,
const char*, const char*))>
a = InvokeArgument<0>("1", "2", "3", "4", "5", "6", "7", "8");
EXPECT_EQ("12345678", a.Perform(std::make_tuple(&Concat8)));
}
TEST(InvokeArgumentTest, Function9) {
Action<std::string(std::string(*)(const char*, const char*, const char*,
const char*, const char*, const char*,
const char*, const char*, const char*))>
a = InvokeArgument<0>("1", "2", "3", "4", "5", "6", "7", "8", "9");
EXPECT_EQ("123456789", a.Perform(std::make_tuple(&Concat9)));
}
TEST(InvokeArgumentTest, Function10) {
Action<std::string(std::string(*)(
const char*, const char*, const char*, const char*, const char*,
const char*, const char*, const char*, const char*, const char*))>
a = InvokeArgument<0>("1", "2", "3", "4", "5", "6", "7", "8", "9", "0");
EXPECT_EQ("1234567890", a.Perform(std::make_tuple(&Concat10)));
}
TEST(InvokeArgumentTest, ByPointerFunction) {
Action<const char*(const char* (*)(const char* input, short n))>
a = InvokeArgument<0>(static_cast<const char*>("Hi"), Short(1));
EXPECT_STREQ("i", a.Perform(std::make_tuple(&Binary)));
}
TEST(InvokeArgumentTest, FunctionWithCStringLiteral) {
Action<const char*(const char* (*)(const char* input, short n))>
a = InvokeArgument<0>("Hi", Short(1));
EXPECT_STREQ("i", a.Perform(std::make_tuple(&Binary)));
}
TEST(InvokeArgumentTest, ByConstReferenceFunction) {
Action<bool(bool (*function)(const std::string& s))> a =
InvokeArgument<0>(std::string("Hi"));
EXPECT_TRUE(a.Perform(std::make_tuple(&ByConstRef)));
}
TEST(InvokeArgumentTest, ByExplicitConstReferenceFunction) {
Action<bool(bool (*)(const double& x))> a =
InvokeArgument<0>(ByRef(g_double));
EXPECT_TRUE(a.Perform(std::make_tuple(&ReferencesGlobalDouble)));
double x = 0;
a = InvokeArgument<0>(ByRef(x));
EXPECT_FALSE(a.Perform(std::make_tuple(&ReferencesGlobalDouble)));
}
TEST(InvokeArgumentTest, MoveOnlyType) {
struct Marker {};
struct {
MOCK_METHOD(bool, MockMethod,
(std::unique_ptr<Marker>, std::function<int()>), ());
} mock;
ON_CALL(mock, MockMethod(_, _)).WillByDefault(InvokeArgument<1>());
ON_CALL(mock, MockMethod(_, _))
.WillByDefault(WithArg<1>(InvokeArgument<0>()));
}
TEST(DoAllTest, TwoActions) {
int n = 0;
Action<int(int*)> a = DoAll(SetArgPointee<0>(1),
Return(2));
EXPECT_EQ(2, a.Perform(std::make_tuple(&n)));
EXPECT_EQ(1, n);
}
TEST(DoAllTest, ThreeActions) {
int m = 0, n = 0;
Action<int(int*, int*)> a = DoAll(SetArgPointee<0>(1),
SetArgPointee<1>(2), Return(3));
EXPECT_EQ(3, a.Perform(std::make_tuple(&m, &n)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
}
TEST(DoAllTest, FourActions) {
int m = 0, n = 0;
char ch = '\0';
Action<int(int*, int*, char*)> a =
DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2), SetArgPointee<2>('a'),
Return(3));
EXPECT_EQ(3, a.Perform(std::make_tuple(&m, &n, &ch)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', ch);
}
TEST(DoAllTest, FiveActions) {
int m = 0, n = 0;
char a = '\0', b = '\0';
Action<int(int*, int*, char*, char*)> action =
DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2), SetArgPointee<2>('a'),
SetArgPointee<3>('b'), Return(3));
EXPECT_EQ(3, action.Perform(std::make_tuple(&m, &n, &a, &b)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', a);
EXPECT_EQ('b', b);
}
TEST(DoAllTest, SixActions) {
int m = 0, n = 0;
char a = '\0', b = '\0', c = '\0';
Action<int(int*, int*, char*, char*, char*)> action =
DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2), SetArgPointee<2>('a'),
SetArgPointee<3>('b'), SetArgPointee<4>('c'), Return(3));
EXPECT_EQ(3, action.Perform(std::make_tuple(&m, &n, &a, &b, &c)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', a);
EXPECT_EQ('b', b);
EXPECT_EQ('c', c);
}
TEST(DoAllTest, SevenActions) {
int m = 0, n = 0;
char a = '\0', b = '\0', c = '\0', d = '\0';
Action<int(int*, int*, char*, char*, char*, char*)> action =
DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2), SetArgPointee<2>('a'),
SetArgPointee<3>('b'), SetArgPointee<4>('c'), SetArgPointee<5>('d'),
Return(3));
EXPECT_EQ(3, action.Perform(std::make_tuple(&m, &n, &a, &b, &c, &d)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', a);
EXPECT_EQ('b', b);
EXPECT_EQ('c', c);
EXPECT_EQ('d', d);
}
TEST(DoAllTest, EightActions) {
int m = 0, n = 0;
char a = '\0', b = '\0', c = '\0', d = '\0', e = '\0';
Action<int(int*, int*, char*, char*, char*, char*,
char*)>
action =
DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2), SetArgPointee<2>('a'),
SetArgPointee<3>('b'), SetArgPointee<4>('c'),
SetArgPointee<5>('d'), SetArgPointee<6>('e'), Return(3));
EXPECT_EQ(3, action.Perform(std::make_tuple(&m, &n, &a, &b, &c, &d, &e)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', a);
EXPECT_EQ('b', b);
EXPECT_EQ('c', c);
EXPECT_EQ('d', d);
EXPECT_EQ('e', e);
}
TEST(DoAllTest, NineActions) {
int m = 0, n = 0;
char a = '\0', b = '\0', c = '\0', d = '\0', e = '\0', f = '\0';
Action<int(int*, int*, char*, char*, char*, char*,
char*, char*)>
action = DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2),
SetArgPointee<2>('a'), SetArgPointee<3>('b'),
SetArgPointee<4>('c'), SetArgPointee<5>('d'),
SetArgPointee<6>('e'), SetArgPointee<7>('f'), Return(3));
EXPECT_EQ(3, action.Perform(std::make_tuple(&m, &n, &a, &b, &c, &d, &e, &f)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', a);
EXPECT_EQ('b', b);
EXPECT_EQ('c', c);
EXPECT_EQ('d', d);
EXPECT_EQ('e', e);
EXPECT_EQ('f', f);
}
TEST(DoAllTest, TenActions) {
int m = 0, n = 0;
char a = '\0', b = '\0', c = '\0', d = '\0';
char e = '\0', f = '\0', g = '\0';
Action<int(int*, int*, char*, char*, char*, char*,
char*, char*, char*)>
action =
DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2), SetArgPointee<2>('a'),
SetArgPointee<3>('b'), SetArgPointee<4>('c'),
SetArgPointee<5>('d'), SetArgPointee<6>('e'),
SetArgPointee<7>('f'), SetArgPointee<8>('g'), Return(3));
EXPECT_EQ(
3, action.Perform(std::make_tuple(&m, &n, &a, &b, &c, &d, &e, &f, &g)));
EXPECT_EQ(1, m);
EXPECT_EQ(2, n);
EXPECT_EQ('a', a);
EXPECT_EQ('b', b);
EXPECT_EQ('c', c);
EXPECT_EQ('d', d);
EXPECT_EQ('e', e);
EXPECT_EQ('f', f);
EXPECT_EQ('g', g);
}
TEST(DoAllTest, NoArgs) {
bool ran_first = false;
Action<bool()> a =
DoAll([&] { ran_first = true; }, [&] { return ran_first; });
EXPECT_TRUE(a.Perform({}));
}
TEST(DoAllTest, MoveOnlyArgs) {
bool ran_first = false;
Action<int(std::unique_ptr<int>)> a =
DoAll(InvokeWithoutArgs([&] { ran_first = true; }),
[](std::unique_ptr<int> p) { return *p; });
EXPECT_EQ(7, a.Perform(std::make_tuple(std::unique_ptr<int>(new int(7)))));
EXPECT_TRUE(ran_first);
}
TEST(DoAllTest, ImplicitlyConvertsActionArguments) {
bool ran_first = false;
Action<void(std::vector<int>)> first = [&] { ran_first = true; };
Action<int(std::vector<int>)> a =
DoAll(first, [](std::vector<int> arg) { return arg.front(); });
EXPECT_EQ(7, a.Perform(std::make_tuple(std::vector<int>{7})));
EXPECT_TRUE(ran_first);
}
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4100 4503)
ACTION(Return5) { return 5; }
TEST(ActionMacroTest, WorksWhenNotReferencingArguments) {
Action<double()> a1 = Return5();
EXPECT_DOUBLE_EQ(5, a1.Perform(std::make_tuple()));
Action<int(double, bool)> a2 = Return5();
EXPECT_EQ(5, a2.Perform(std::make_tuple(1, true)));
}
ACTION(IncrementArg1) { (*arg1)++; }
TEST(ActionMacroTest, WorksWhenReturningVoid) {
Action<void(int, int*)> a1 = IncrementArg1();
int n = 0;
a1.Perform(std::make_tuple(5, &n));
EXPECT_EQ(1, n);
}
ACTION(IncrementArg2) {
StaticAssertTypeEq<int*, arg2_type>();
arg2_type temp = arg2;
(*temp)++;
}
TEST(ActionMacroTest, CanReferenceArgumentType) {
Action<void(int, bool, int*)> a1 = IncrementArg2();
int n = 0;
a1.Perform(std::make_tuple(5, false, &n));
EXPECT_EQ(1, n);
}
ACTION(Sum2) {
StaticAssertTypeEq<std::tuple<int, char, int*>, args_type>();
args_type args_copy = args;
return std::get<0>(args_copy) + std::get<1>(args_copy);
}
TEST(ActionMacroTest, CanReferenceArgumentTuple) {
Action<int(int, char, int*)> a1 = Sum2();
int dummy = 0;
EXPECT_EQ(11, a1.Perform(std::make_tuple(5, Char(6), &dummy)));
}
namespace {
int Dummy(bool flag) { return flag ? 1 : 0; }
}
ACTION(InvokeDummy) {
StaticAssertTypeEq<int(bool), function_type>();
function_type* fp = &Dummy;
return (*fp)(true);
}
TEST(ActionMacroTest, CanReferenceMockFunctionType) {
Action<int(bool)> a1 = InvokeDummy();
EXPECT_EQ(1, a1.Perform(std::make_tuple(true)));
EXPECT_EQ(1, a1.Perform(std::make_tuple(false)));
}
ACTION(InvokeDummy2) {
StaticAssertTypeEq<int, return_type>();
return_type result = Dummy(true);
return result;
}
TEST(ActionMacroTest, CanReferenceMockFunctionReturnType) {
Action<int(bool)> a1 = InvokeDummy2();
EXPECT_EQ(1, a1.Perform(std::make_tuple(true)));
EXPECT_EQ(1, a1.Perform(std::make_tuple(false)));
}
ACTION(ReturnAddrOfConstBoolReferenceArg) {
StaticAssertTypeEq<const bool&, arg1_type>();
return &arg1;
}
TEST(ActionMacroTest, WorksForConstReferenceArg) {
Action<const bool*(int, const bool&)> a = ReturnAddrOfConstBoolReferenceArg();
const bool b = false;
EXPECT_EQ(&b, a.Perform(std::tuple<int, const bool&>(0, b)));
}
ACTION(ReturnAddrOfIntReferenceArg) {
StaticAssertTypeEq<int&, arg0_type>();
return &arg0;
}
TEST(ActionMacroTest, WorksForNonConstReferenceArg) {
Action<int*(int&, bool, int)> a = ReturnAddrOfIntReferenceArg();
int n = 0;
EXPECT_EQ(&n, a.Perform(std::tuple<int&, bool, int>(n, true, 1)));
}
namespace action_test {
ACTION(Sum) { return arg0 + arg1; }
}
TEST(ActionMacroTest, WorksInNamespace) {
Action<int(int, int)> a1 = action_test::Sum();
EXPECT_EQ(3, a1.Perform(std::make_tuple(1, 2)));
}
ACTION(PlusTwo) { return arg0 + 2; }
TEST(ActionMacroTest, WorksForDifferentArgumentNumbers) {
Action<int(int)> a1 = PlusTwo();
EXPECT_EQ(4, a1.Perform(std::make_tuple(2)));
Action<double(float, void*)> a2 = PlusTwo();
int dummy;
EXPECT_DOUBLE_EQ(6, a2.Perform(std::make_tuple(4.0f, &dummy)));
}
ACTION_P(Plus, n) { return arg0 + n; }
TEST(ActionPMacroTest, DefinesParameterizedAction) {
Action<int(int m, bool t)> a1 = Plus(9);
EXPECT_EQ(10, a1.Perform(std::make_tuple(1, true)));
}
ACTION_P(TypedPlus, n) {
arg0_type t1 = arg0;
n_type t2 = n;
return t1 + t2;
}
TEST(ActionPMacroTest, CanReferenceArgumentAndParameterTypes) {
Action<int(char m, bool t)> a1 = TypedPlus(9);
EXPECT_EQ(10, a1.Perform(std::make_tuple(Char(1), true)));
}
TEST(ActionPMacroTest, WorksInCompatibleMockFunction) {
Action<std::string(const std::string& s)> a1 = Plus("tail");
const std::string re = "re";
std::tuple<const std::string> dummy = std::make_tuple(re);
EXPECT_EQ("retail", a1.Perform(dummy));
}
ACTION(OverloadedAction) { return arg0 ? arg1 : "hello"; }
ACTION_P(OverloadedAction, default_value) {
return arg0 ? arg1 : default_value;
}
ACTION_P2(OverloadedAction, true_value, false_value) {
return arg0 ? true_value : false_value;
}
TEST(ActionMacroTest, CanDefineOverloadedActions) {
using MyAction = Action<const char*(bool, const char*)>;
const MyAction a1 = OverloadedAction();
EXPECT_STREQ("hello", a1.Perform(std::make_tuple(false, CharPtr("world"))));
EXPECT_STREQ("world", a1.Perform(std::make_tuple(true, CharPtr("world"))));
const MyAction a2 = OverloadedAction("hi");
EXPECT_STREQ("hi", a2.Perform(std::make_tuple(false, CharPtr("world"))));
EXPECT_STREQ("world", a2.Perform(std::make_tuple(true, CharPtr("world"))));
const MyAction a3 = OverloadedAction("hi", "you");
EXPECT_STREQ("hi", a3.Perform(std::make_tuple(true, CharPtr("world"))));
EXPECT_STREQ("you", a3.Perform(std::make_tuple(false, CharPtr("world"))));
}
ACTION_P3(Plus, m, n, k) { return arg0 + m + n + k; }
TEST(ActionPnMacroTest, WorksFor3Parameters) {
Action<double(int m, bool t)> a1 = Plus(100, 20, 3.4);
EXPECT_DOUBLE_EQ(3123.4, a1.Perform(std::make_tuple(3000, true)));
Action<std::string(const std::string& s)> a2 = Plus("tail", "-", ">");
const std::string re = "re";
std::tuple<const std::string> dummy = std::make_tuple(re);
EXPECT_EQ("retail->", a2.Perform(dummy));
}
ACTION_P4(Plus, p0, p1, p2, p3) { return arg0 + p0 + p1 + p2 + p3; }
TEST(ActionPnMacroTest, WorksFor4Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4);
EXPECT_EQ(10 + 1 + 2 + 3 + 4, a1.Perform(std::make_tuple(10)));
}
ACTION_P5(Plus, p0, p1, p2, p3, p4) { return arg0 + p0 + p1 + p2 + p3 + p4; }
TEST(ActionPnMacroTest, WorksFor5Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4, 5);
EXPECT_EQ(10 + 1 + 2 + 3 + 4 + 5, a1.Perform(std::make_tuple(10)));
}
ACTION_P6(Plus, p0, p1, p2, p3, p4, p5) {
return arg0 + p0 + p1 + p2 + p3 + p4 + p5;
}
TEST(ActionPnMacroTest, WorksFor6Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4, 5, 6);
EXPECT_EQ(10 + 1 + 2 + 3 + 4 + 5 + 6, a1.Perform(std::make_tuple(10)));
}
ACTION_P7(Plus, p0, p1, p2, p3, p4, p5, p6) {
return arg0 + p0 + p1 + p2 + p3 + p4 + p5 + p6;
}
TEST(ActionPnMacroTest, WorksFor7Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4, 5, 6, 7);
EXPECT_EQ(10 + 1 + 2 + 3 + 4 + 5 + 6 + 7, a1.Perform(std::make_tuple(10)));
}
ACTION_P8(Plus, p0, p1, p2, p3, p4, p5, p6, p7) {
return arg0 + p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7;
}
TEST(ActionPnMacroTest, WorksFor8Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4, 5, 6, 7, 8);
EXPECT_EQ(10 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
a1.Perform(std::make_tuple(10)));
}
ACTION_P9(Plus, p0, p1, p2, p3, p4, p5, p6, p7, p8) {
return arg0 + p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8;
}
TEST(ActionPnMacroTest, WorksFor9Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4, 5, 6, 7, 8, 9);
EXPECT_EQ(10 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9,
a1.Perform(std::make_tuple(10)));
}
ACTION_P10(Plus, p0, p1, p2, p3, p4, p5, p6, p7, p8, last_param) {
arg0_type t0 = arg0;
last_param_type t9 = last_param;
return t0 + p0 + p1 + p2 + p3 + p4 + p5 + p6 + p7 + p8 + t9;
}
TEST(ActionPnMacroTest, WorksFor10Parameters) {
Action<int(int)> a1 = Plus(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
EXPECT_EQ(10 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10,
a1.Perform(std::make_tuple(10)));
}
ACTION_P2(PadArgument, prefix, suffix) {
std::string prefix_str(prefix);
char suffix_char = static_cast<char>(suffix);
return prefix_str + arg0 + suffix_char;
}
TEST(ActionPnMacroTest, SimpleTypePromotion) {
Action<std::string(const char*)> no_promo =
PadArgument(std::string("foo"), 'r');
Action<std::string(const char*)> promo =
PadArgument("foo", static_cast<int>('r'));
EXPECT_EQ("foobar", no_promo.Perform(std::make_tuple(CharPtr("ba"))));
EXPECT_EQ("foobar", promo.Perform(std::make_tuple(CharPtr("ba"))));
}
ACTION_P3(ConcatImpl, a, b, c) {
std::stringstream ss;
ss << a << b << c;
return ss.str();
}
template <typename T1, typename T2>
ConcatImplActionP3<std::string, T1, T2> Concat(const std::string& a, T1 b,
T2 c) {
GTEST_INTENTIONAL_CONST_COND_PUSH_()
if (true) {
GTEST_INTENTIONAL_CONST_COND_POP_()
return ConcatImpl(a, b, c);
} else {
return ConcatImpl<std::string, T1, T2>(a, b, c);
}
}
template <typename T1, typename T2>
ConcatImplActionP3<T1, int, T2> Concat(T1 a, int b, T2 c) {
return ConcatImpl(a, b, c);
}
TEST(ActionPnMacroTest, CanPartiallyRestrictParameterTypes) {
Action<const std::string()> a1 = Concat("Hello", "1", 2);
EXPECT_EQ("Hello12", a1.Perform(std::make_tuple()));
a1 = Concat(1, 2, 3);
EXPECT_EQ("123", a1.Perform(std::make_tuple()));
}
ACTION(DoFoo) {}
ACTION_P(DoFoo, p) {}
ACTION_P2(DoFoo, p0, p1) {}
TEST(ActionPnMacroTest, TypesAreCorrect) {
DoFooAction a0 = DoFoo();
DoFooActionP<int> a1 = DoFoo(1);
DoFooActionP2<int, char> a2 = DoFoo(1, '2');
PlusActionP3<int, int, char> a3 = Plus(1, 2, '3');
PlusActionP4<int, int, int, char> a4 = Plus(1, 2, 3, '4');
PlusActionP5<int, int, int, int, char> a5 = Plus(1, 2, 3, 4, '5');
PlusActionP6<int, int, int, int, int, char> a6 = Plus(1, 2, 3, 4, 5, '6');
PlusActionP7<int, int, int, int, int, int, char> a7 =
Plus(1, 2, 3, 4, 5, 6, '7');
PlusActionP8<int, int, int, int, int, int, int, char> a8 =
Plus(1, 2, 3, 4, 5, 6, 7, '8');
PlusActionP9<int, int, int, int, int, int, int, int, char> a9 =
Plus(1, 2, 3, 4, 5, 6, 7, 8, '9');
PlusActionP10<int, int, int, int, int, int, int, int, int, char> a10 =
Plus(1, 2, 3, 4, 5, 6, 7, 8, 9, '0');
(void)a0;
(void)a1;
(void)a2;
(void)a3;
(void)a4;
(void)a5;
(void)a6;
(void)a7;
(void)a8;
(void)a9;
(void)a10;
}
ACTION_P(Plus1, x) { return x; }
ACTION_P2(Plus2, x, y) { return x + y; }
ACTION_P3(Plus3, x, y, z) { return x + y + z; }
ACTION_P10(Plus10, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9;
}
TEST(ActionPnMacroTest, CanExplicitlyInstantiateWithReferenceTypes) {
int x = 1, y = 2, z = 3;
const std::tuple<> empty = std::make_tuple();
Action<int()> a = Plus1<int&>(x);
EXPECT_EQ(1, a.Perform(empty));
a = Plus2<const int&, int&>(x, y);
EXPECT_EQ(3, a.Perform(empty));
a = Plus3<int&, const int&, int&>(x, y, z);
EXPECT_EQ(6, a.Perform(empty));
int n[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
a = Plus10<const int&, int&, const int&, int&, const int&, int&, const int&,
int&, const int&, int&>(n[0], n[1], n[2], n[3], n[4], n[5], n[6],
n[7], n[8], n[9]);
EXPECT_EQ(55, a.Perform(empty));
}
class TenArgConstructorClass {
public:
TenArgConstructorClass(int a1, int a2, int a3, int a4, int a5, int a6, int a7,
int a8, int a9, int a10)
: value_(a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 + a10) {}
int value_;
};
ACTION_TEMPLATE(CreateNew, HAS_1_TEMPLATE_PARAMS(typename, T),
AND_0_VALUE_PARAMS()) {
return new T;
}
TEST(ActionTemplateTest, WorksWithoutValueParam) {
const Action<int*()> a = CreateNew<int>();
int* p = a.Perform(std::make_tuple());
delete p;
}
ACTION_TEMPLATE(CreateNew, HAS_1_TEMPLATE_PARAMS(typename, T),
AND_1_VALUE_PARAMS(a0)) {
return new T(a0);
}
TEST(ActionTemplateTest, WorksWithValueParams) {
const Action<int*()> a = CreateNew<int>(42);
int* p = a.Perform(std::make_tuple());
EXPECT_EQ(42, *p);
delete p;
}
ACTION_TEMPLATE(MyDeleteArg, HAS_1_TEMPLATE_PARAMS(int, k),
AND_0_VALUE_PARAMS()) {
delete std::get<k>(args);
}
class BoolResetter {
public:
explicit BoolResetter(bool* value) : value_(value) {}
~BoolResetter() { *value_ = false; }
private:
bool* value_;
};
TEST(ActionTemplateTest, WorksForIntegralTemplateParams) {
const Action<void(int*, BoolResetter*)> a = MyDeleteArg<1>();
int n = 0;
bool b = true;
auto* resetter = new BoolResetter(&b);
a.Perform(std::make_tuple(&n, resetter));
EXPECT_FALSE(b);
}
ACTION_TEMPLATE(ReturnSmartPointer,
HAS_1_TEMPLATE_PARAMS(template <typename Pointee> class,
Pointer),
AND_1_VALUE_PARAMS(pointee)) {
return Pointer<pointee_type>(new pointee_type(pointee));
}
TEST(ActionTemplateTest, WorksForTemplateTemplateParameters) {
const Action<std::shared_ptr<int>()> a =
ReturnSmartPointer<std::shared_ptr>(42);
std::shared_ptr<int> p = a.Perform(std::make_tuple());
EXPECT_EQ(42, *p);
}
template <typename T1, typename T2, typename T3, int k4, bool k5,
unsigned int k6, typename T7, typename T8, typename T9>
struct GiantTemplate {
public:
explicit GiantTemplate(int a_value) : value(a_value) {}
int value;
};
ACTION_TEMPLATE(ReturnGiant,
HAS_10_TEMPLATE_PARAMS(typename, T1, typename, T2, typename, T3,
int, k4, bool, k5, unsigned int, k6,
class, T7, class, T8, class, T9,
template <typename T> class, T10),
AND_1_VALUE_PARAMS(value)) {
return GiantTemplate<T10<T1>, T2, T3, k4, k5, k6, T7, T8, T9>(value);
}
TEST(ActionTemplateTest, WorksFor10TemplateParameters) {
using Giant = GiantTemplate<std::shared_ptr<int>, bool, double, 5, true, 6,
char, unsigned, int>;
const Action<Giant()> a = ReturnGiant<int, bool, double, 5, true, 6, char,
unsigned, int, std::shared_ptr>(42);
Giant giant = a.Perform(std::make_tuple());
EXPECT_EQ(42, giant.value);
}
ACTION_TEMPLATE(ReturnSum, HAS_1_TEMPLATE_PARAMS(typename, Number),
AND_10_VALUE_PARAMS(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10)) {
return static_cast<Number>(v1) + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10;
}
TEST(ActionTemplateTest, WorksFor10ValueParameters) {
const Action<int()> a = ReturnSum<int>(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
EXPECT_EQ(55, a.Perform(std::make_tuple()));
}
ACTION(ReturnSum) { return 0; }
ACTION_P(ReturnSum, x) { return x; }
ACTION_TEMPLATE(ReturnSum, HAS_1_TEMPLATE_PARAMS(typename, Number),
AND_2_VALUE_PARAMS(v1, v2)) {
return static_cast<Number>(v1) + v2;
}
ACTION_TEMPLATE(ReturnSum, HAS_1_TEMPLATE_PARAMS(typename, Number),
AND_3_VALUE_PARAMS(v1, v2, v3)) {
return static_cast<Number>(v1) + v2 + v3;
}
ACTION_TEMPLATE(ReturnSum, HAS_2_TEMPLATE_PARAMS(typename, Number, int, k),
AND_4_VALUE_PARAMS(v1, v2, v3, v4)) {
return static_cast<Number>(v1) + v2 + v3 + v4 + k;
}
TEST(ActionTemplateTest, CanBeOverloadedOnNumberOfValueParameters) {
const Action<int()> a0 = ReturnSum();
const Action<int()> a1 = ReturnSum(1);
const Action<int()> a2 = ReturnSum<int>(1, 2);
const Action<int()> a3 = ReturnSum<int>(1, 2, 3);
const Action<int()> a4 = ReturnSum<int, 10000>(2000, 300, 40, 5);
EXPECT_EQ(0, a0.Perform(std::make_tuple()));
EXPECT_EQ(1, a1.Perform(std::make_tuple()));
EXPECT_EQ(3, a2.Perform(std::make_tuple()));
EXPECT_EQ(6, a3.Perform(std::make_tuple()));
EXPECT_EQ(12345, a4.Perform(std::make_tuple()));
}
}
}
GTEST_DISABLE_MSC_WARNINGS_POP_()
GTEST_DISABLE_MSC_WARNINGS_POP_() | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/include/gmock/gmock-more-actions.h | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-more-actions_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
61925c0b-7afa-459a-b403-0769cea80bda | cpp | tensorflow/tensorflow | snapshot_utils | tensorflow/core/data/snapshot_utils.cc | tensorflow/core/data/snapshot_utils_test.cc | #include "tensorflow/core/data/snapshot_utils.h"
#include <algorithm>
#include <climits>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/io/snappy/snappy_inputbuffer.h"
#include "xla/tsl/lib/io/snappy/snappy_outputbuffer.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/lib/io/zlib_outputbuffer.h"
#include "tensorflow/core/platform/coding.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
constexpr const char* const kOutputTypes = "output_types";
constexpr const char* const kOutputShapes = "output_shapes";
constexpr const char* const kCompression = "compression";
constexpr const char* const kVersion = "version";
constexpr const char* const kCurrentCheckpointID = "current_checkpoint_id";
constexpr const char* const kIndex = "index";
constexpr const char* const kStartIndex = "start_index";
std::string ProtoSerializationErrorMessage(const TensorProto& proto,
const std::string& output_file) {
const auto proto_byte_size = proto.ByteSizeLong();
std::string error_message =
absl::StrCat("Failed to serialize tensor proto of ", proto_byte_size,
" bytes to file: ", output_file);
if (proto_byte_size > INT_MAX) {
absl::StrAppend(&error_message, ": exceeded maximum protobuf size of 2GB.");
}
return error_message;
}
}
constexpr const int64_t
CustomReader::kSnappyReaderInputBufferSizeBytes;
constexpr const int64_t
CustomReader::kSnappyReaderOutputBufferSizeBytes;
std::string HashDirectory(const std::string& path, uint64 hash) {
return io::JoinPath(
path, strings::Printf("%llu", static_cast<unsigned long long>(hash)));
}
std::string RunDirectory(const std::string& hash_directory, uint64 run_id) {
return RunDirectory(
hash_directory,
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
}
std::string RunDirectory(const std::string& hash_directory,
const std::string& run_id) {
return io::JoinPath(hash_directory, run_id);
}
std::string ShardDirectory(const std::string& run_directory, int64_t shard_id) {
return io::JoinPath(
run_directory,
strings::Printf("%08llu%s", static_cast<unsigned long long>(shard_id),
kShardDirectorySuffix));
}
std::string GetCheckpointFileName(const std::string& shard_directory,
uint64 checkpoint_id) {
return io::JoinPath(
shard_directory,
strings::Printf("%08llu.snapshot",
static_cast<unsigned long long>(checkpoint_id)));
}
Status Writer::Create(Env* env, const std::string& filename,
const std::string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Writer>* out_writer) {
switch (version) {
case 1:
*out_writer =
std::make_unique<CustomWriter>(filename, compression_type, dtypes);
break;
case 2:
*out_writer =
std::make_unique<TFRecordWriter>(filename, compression_type);
break;
default:
return errors::InvalidArgument("Snapshot writer version: ", version,
" is not supported.");
}
return (*out_writer)->Initialize(env);
}
TFRecordWriter::TFRecordWriter(const std::string& filename,
const std::string& compression_type)
: filename_(filename), compression_type_(compression_type) {}
Status TFRecordWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
record_writer_ = std::make_unique<io::RecordWriter>(
dest_.get(), io::RecordWriterOptions::CreateRecordWriterOptions(
compression_type_));
return absl::OkStatus();
}
Status TFRecordWriter::WriteTensors(const std::vector<Tensor>& tensors) {
for (const auto& tensor : tensors) {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
#if defined(TF_CORD_SUPPORT)
auto* proto_buffer = new std::string();
if (!proto.SerializeToString(proto_buffer)) {
delete proto_buffer;
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
absl::Cord proto_serialized = absl::MakeCordFromExternal(
*proto_buffer,
[proto_buffer](absl::string_view) { delete proto_buffer; });
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#else
std::string proto_serialized;
if (!proto.SerializeToString(&proto_serialized)) {
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#endif
}
return absl::OkStatus();
}
Status TFRecordWriter::Sync() {
TF_RETURN_IF_ERROR(record_writer_->Flush());
return dest_->Flush();
}
Status TFRecordWriter::Close() {
if (record_writer_ != nullptr) {
TF_RETURN_IF_ERROR(Sync());
TF_RETURN_IF_ERROR(record_writer_->Close());
TF_RETURN_IF_ERROR(dest_->Close());
record_writer_ = nullptr;
dest_ = nullptr;
}
return absl::OkStatus();
}
TFRecordWriter::~TFRecordWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Failed to close snapshot file " << filename_ << ": " << s;
}
}
CustomWriter::CustomWriter(const std::string& filename,
const std::string& compression_type,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
dtypes_(dtypes) {}
Status CustomWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
zlib_underlying_dest_.swap(dest_);
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
io::ZlibOutputBuffer* zlib_output_buffer = new io::ZlibOutputBuffer(
zlib_underlying_dest_.get(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options);
TF_CHECK_OK(zlib_output_buffer->Init());
dest_.reset(zlib_output_buffer);
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomWriter::WriteTensors(const std::vector<Tensor>& tensors) {
if (compression_type_ != io::compression::kSnappy) {
experimental::SnapshotRecord record;
for (const auto& tensor : tensors) {
TensorProto* t = record.add_tensor();
tensor.AsProtoTensorContent(t);
}
#if defined(TF_CORD_SUPPORT)
auto record_buffer = new std::string();
record.SerializeToString(record_buffer);
absl::Cord record_serialized = absl::MakeCordFromExternal(
*record_buffer,
[record_buffer](absl::string_view) { delete record_buffer; });
return WriteRecord(record_serialized);
#else
return WriteRecord(record.SerializeAsString());
#endif
}
std::vector<const TensorBuffer*> tensor_buffers;
tensor_buffers.reserve(num_simple_);
std::vector<TensorProto> tensor_protos;
tensor_protos.reserve(num_complex_);
experimental::SnapshotTensorMetadata metadata;
int64_t total_size = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const Tensor& tensor = tensors[i];
experimental::TensorMetadata* tensor_metadata =
metadata.add_tensor_metadata();
tensor.shape().AsProto(tensor_metadata->mutable_tensor_shape());
int64_t size = 0;
if (simple_tensor_mask_[i]) {
auto tensor_buffer = DMAHelper::buffer(&tensor);
tensor_buffers.push_back(tensor_buffer);
size = tensor_buffer->size();
} else {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
size = proto.ByteSizeLong();
tensor_protos.push_back(std::move(proto));
}
tensor_metadata->set_tensor_size_bytes(size);
total_size += size;
}
std::vector<char> uncompressed(total_size);
char* position = uncompressed.data();
int buffer_index = 0;
int proto_index = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const auto& tensor_metadata = metadata.tensor_metadata(i);
if (simple_tensor_mask_[i]) {
memcpy(position, tensor_buffers[buffer_index]->data(),
tensor_metadata.tensor_size_bytes());
buffer_index++;
} else {
tensor_protos[proto_index].SerializeToArray(
position, tensor_metadata.tensor_size_bytes());
proto_index++;
}
position += tensor_metadata.tensor_size_bytes();
}
DCHECK_EQ(position, uncompressed.data() + total_size);
string output;
if (!tsl::port::Snappy_Compress(uncompressed.data(), total_size, &output)) {
return errors::Internal("Failed to compress using snappy.");
}
#if defined(TF_CORD_SUPPORT)
auto metadata_buffer = new std::string();
metadata.SerializeToString(metadata_buffer);
absl::Cord metadata_serialized = absl::MakeCordFromExternal(
*metadata_buffer,
[metadata_buffer](absl::string_view) { delete metadata_buffer; });
#else
std::string metadata_serialized = metadata.SerializeAsString();
#endif
TF_RETURN_IF_ERROR(WriteRecord(metadata_serialized));
TF_RETURN_IF_ERROR(WriteRecord(output));
return absl::OkStatus();
}
Status CustomWriter::Sync() { return dest_->Sync(); }
Status CustomWriter::Close() {
if (dest_ != nullptr) {
TF_RETURN_IF_ERROR(dest_->Close());
dest_ = nullptr;
}
if (zlib_underlying_dest_ != nullptr) {
TF_RETURN_IF_ERROR(zlib_underlying_dest_->Close());
zlib_underlying_dest_ = nullptr;
}
return absl::OkStatus();
}
CustomWriter::~CustomWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Could not finish writing file: " << s;
}
}
Status CustomWriter::WriteRecord(const StringPiece& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#if defined(TF_CORD_SUPPORT)
Status CustomWriter::WriteRecord(const absl::Cord& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#endif
Status Reader::Create(Env* env, const std::string& filename,
const string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Reader>* out_reader) {
switch (version) {
case 0:
case 1:
*out_reader = std::make_unique<CustomReader>(filename, compression_type,
version, dtypes);
break;
case 2:
*out_reader =
std::make_unique<TFRecordReader>(filename, compression_type, dtypes);
break;
default:
return errors::InvalidArgument("Snapshot reader version: ", version,
" is not supported.");
}
return (*out_reader)->Initialize(env);
}
Status Reader::SkipRecords(int64_t num_records) {
for (int i = 0; i < num_records; ++i) {
std::vector<Tensor> unused_tensors;
TF_RETURN_IF_ERROR(ReadTensors(&unused_tensors));
}
return absl::OkStatus();
}
class Reader::Dataset : public DatasetBase {
public:
Dataset(DatasetContext&& ctx, const std::string& shard_dir,
const std::string& compression, const int64_t version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index)
: DatasetBase(std::move(ctx)),
shard_dir_(shard_dir),
compression_(compression),
version_(version),
dtypes_(dtypes),
shapes_(shapes),
start_index_(start_index) {}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
std::string DebugString() const override { return "SnapshotDatasetReader"; }
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
Node* shard_dir = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(shard_dir_, &shard_dir));
Node* start_index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(start_index_, &start_index));
AttrValue compression;
b->BuildAttrValue(compression_, &compression);
AttrValue version;
b->BuildAttrValue(version_, &version);
return b->AddDataset(
this,
{std::make_pair(0, shard_dir), std::make_pair(1, start_index)},
{},
{{kCompression, compression}, {kVersion, version}},
true, node);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
start_index_(dataset()->start_index_) {}
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(Reader::Create(
ctx->env(), GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_));
return AdvanceToStartIndex(ctx);
}
protected:
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
Status s = reader_->ReadTensors(out_tensors);
if (!absl::IsOutOfRange(s)) {
start_index_++;
return s;
}
Status status = AdvanceToNextFile(ctx->env());
if (absl::IsNotFound(status)) {
*end_of_sequence = true;
return absl::OkStatus();
}
return status;
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kCurrentCheckpointID),
current_checkpoint_id_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kStartIndex), start_index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointID),
¤t_checkpoint_id_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kStartIndex), &start_index_));
TF_RETURN_IF_ERROR(ctx->env()->FileExists(GetCurrentFilename()));
TF_RETURN_IF_ERROR(Reader::Create(
ctx->env(), GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_));
return AdvanceToStartIndex(ctx);
}
private:
Status AdvanceToNextFile(Env* env) {
start_index_ = 0;
current_checkpoint_id_++;
TF_RETURN_IF_ERROR(env->FileExists(GetCurrentFilename()));
return Reader::Create(env, GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_);
}
std::string GetCurrentFilename() {
return GetCheckpointFileName(dataset()->shard_dir_,
current_checkpoint_id_);
}
Status AdvanceToStartIndex(IteratorContext* ctx) {
for (int64_t i = 0; i < start_index_; ++i) {
std::vector<Tensor> unused;
TF_RETURN_IF_ERROR(reader_->ReadTensors(&unused));
}
return absl::OkStatus();
}
std::unique_ptr<Reader> reader_;
int64_t current_checkpoint_id_ = 0;
int64_t start_index_;
};
const tstring shard_dir_;
const std::string compression_;
const int64_t version_;
const DataTypeVector dtypes_;
const std::vector<PartialTensorShape> shapes_;
const int64_t start_index_;
};
Reader::DatasetOp::DatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kVersion, &version_));
}
void Reader::DatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
tstring shard_dir;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "shard_dir", &shard_dir));
int64_t start_index;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "start_index", &start_index));
*output =
new Reader::Dataset(DatasetContext(ctx), shard_dir, compression_,
version_, output_types_, output_shapes_, start_index);
}
class Reader::NestedDataset : public DatasetBase {
public:
explicit NestedDataset(DatasetContext&& ctx,
std::vector<DatasetBase*> datasets)
: DatasetBase(std::move(ctx)), datasets_(datasets) {
dtypes_.push_back(DT_VARIANT);
absl::InlinedVector<int64_t, 1UL> element_dim_sizes;
element_dim_sizes.push_back(1);
partial_shapes_.emplace_back(element_dim_sizes);
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return partial_shapes_;
}
std::string DebugString() const override {
return "SnapshotNestedDatasetReader";
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->clear();
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
std::vector<Node*> input_graph_nodes;
input_graph_nodes.reserve(datasets_.size());
for (const auto& dataset : datasets_) {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, dataset, &input_node));
input_graph_nodes.emplace_back(input_node);
}
TF_RETURN_IF_ERROR(
b->AddDataset(this, {},
{std::make_pair(0, input_graph_nodes)},
{}, node));
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
std::vector<DatasetBase*> datasets_;
DataTypeVector dtypes_;
std::vector<PartialTensorShape> partial_shapes_;
class Iterator : public DatasetIterator<NestedDataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<NestedDataset>(params) {}
protected:
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64_t num_datasets = dataset()->datasets_.size();
*end_of_sequence = num_datasets == index_;
if (!*end_of_sequence) {
Tensor tensor(DT_VARIANT, TensorShape({}));
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(dataset()->datasets_[index_], &tensor));
out_tensors->clear();
out_tensors->push_back(std::move(tensor));
index_++;
}
return absl::OkStatus();
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kIndex), index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kIndex), &index_));
return absl::OkStatus();
}
private:
int64_t index_ = 0;
};
};
Reader::NestedDatasetOp::NestedDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void Reader::NestedDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
std::vector<DatasetBase*> inputs;
for (size_t i = 0; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
inputs.push_back(input);
}
*output = new Reader::NestedDataset(DatasetContext(ctx), inputs);
(*output)->Initialize({});
}
Status Reader::MakeNestedDataset(Env* env,
const std::vector<std::string>& shard_dirs,
const string& compression_type, int version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index,
DatasetBase** output) {
std::vector<DatasetBase*> datasets;
datasets.reserve(shard_dirs.size());
for (int64_t i = 0; i < shard_dirs.size(); ++i) {
int64_t dataset_start_index = start_index / shard_dirs.size();
if (start_index % shard_dirs.size() > datasets.size()) {
dataset_start_index++;
}
datasets.push_back(
new Dataset(DatasetContext(DatasetContext::Params(
{"SnapshotDatasetReader",
strings::StrCat("SnapshotDatasetReader/_", i)})),
shard_dirs.at(i), compression_type, version, dtypes, shapes,
dataset_start_index));
datasets.back()->Initialize({});
}
if (!shard_dirs.empty()) {
std::rotate(datasets.begin(),
datasets.begin() + (start_index % shard_dirs.size()),
datasets.end());
}
MakeNestedDataset(datasets, output);
return absl::OkStatus();
}
void Reader::MakeNestedDataset(const std::vector<DatasetBase*>& datasets,
DatasetBase** output) {
*output = new NestedDataset(
DatasetContext(DatasetContext::Params(
{"SnapshotNestedDatasetReader", "SnapshotNestedDatasetReader"})),
datasets);
(*output)->Initialize({});
}
TFRecordReaderImpl::TFRecordReaderImpl(
const std::string& filename, const string& compression,
std::optional<int64_t> output_buffer_size)
: filename_(filename),
offset_(0),
bytes_read_(0),
compression_(compression),
output_buffer_size_(output_buffer_size) {}
Status TFRecordReaderImpl::Initialize(Env* env) {
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename_, &file_));
auto options = io::RecordReaderOptions::CreateRecordReaderOptions(
compression_);
#if !defined(IS_SLIM_BUILD)
if (output_buffer_size_.has_value()) {
options.snappy_options.output_buffer_size = *output_buffer_size_;
options.zlib_options.output_buffer_size = *output_buffer_size_;
}
#endif
record_reader_ = std::make_unique<io::RecordReader>(file_.get(), options);
bytes_read_ = 0;
return absl::OkStatus();
}
absl::StatusOr<Tensor> TFRecordReaderImpl::GetNext() {
tstring record;
TF_RETURN_IF_ERROR(record_reader_->ReadRecord(&offset_, &record));
bytes_read_ += record.size();
return Parse(record);
}
absl::StatusOr<std::vector<Tensor>> TFRecordReaderImpl::GetTensors() {
std::vector<Tensor> tensors;
while (true) {
absl::StatusOr<Tensor> tensor = GetNext();
if (absl::IsOutOfRange(tensor.status())) {
return tensors;
}
TF_RETURN_IF_ERROR(tensor.status());
tensors.push_back(std::move(*tensor));
}
return tensors;
}
absl::StatusOr<Tensor> TFRecordReaderImpl::Parse(const tstring& record) {
TensorProto proto;
if (!proto.ParseFromArray(record.data(), record.size())) {
return errors::DataLoss(
"Unable to parse tensor from stored proto in file: ", filename_,
", record ", offset_, ". Serialized proto: ", record);
}
Tensor tensor;
if (!tensor.FromProto(proto)) {
return errors::DataLoss(
"Unable to parse tensor from stored proto in file: ", filename_,
", record ", offset_, ". TensorProto: ", proto.ShortDebugString());
}
return tensor;
}
Status TFRecordReader::ReadTensors(std::vector<Tensor>* read_tensors) {
read_tensors->clear();
read_tensors->reserve(dtypes_.size());
for (int i = 0; i < dtypes_.size(); ++i) {
TF_ASSIGN_OR_RETURN(Tensor tensor, reader_impl_.GetNext());
read_tensors->push_back(std::move(tensor));
}
return absl::OkStatus();
}
CustomReader::CustomReader(const std::string& filename,
const string& compression_type, const int version,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
version_(version),
dtypes_(dtypes) {}
Status CustomReader::Initialize(Env* env) {
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename_, &file_));
input_stream_ = std::make_unique<io::RandomAccessInputStream>(file_.get());
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
input_stream_ = std::make_unique<io::ZlibInputStream>(
input_stream_.release(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options, true);
} else if (compression_type_ == io::compression::kSnappy) {
if (version_ == 0) {
input_stream_ = std::make_unique<tsl::io::SnappyInputBuffer>(
file_.get(), kSnappyReaderInputBufferSizeBytes,
kSnappyReaderOutputBufferSizeBytes);
} else {
input_stream_ =
std::make_unique<io::BufferedInputStream>(file_.get(), 64 << 20);
}
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomReader::ReadTensors(std::vector<Tensor>* read_tensors) {
tsl::profiler::TraceMe activity(
[&]() { return absl::StrCat(kClassName, kSeparator, "ReadTensors"); },
tsl::profiler::TraceMeLevel::kInfo);
if (version_ == 0 || compression_type_ != io::compression::kSnappy) {
return ReadTensorsV0(read_tensors);
}
if (version_ != 1) {
return errors::InvalidArgument("Version: ", version_, " is not supported.");
}
if (compression_type_ != io::compression::kSnappy) {
return errors::InvalidArgument("Compression ", compression_type_,
" is not supported.");
}
experimental::SnapshotTensorMetadata metadata;
tstring metadata_str;
TF_RETURN_IF_ERROR(ReadRecord(&metadata_str));
if (!metadata.ParseFromArray(metadata_str.data(), metadata_str.size())) {
return errors::DataLoss("Could not parse SnapshotTensorMetadata");
}
read_tensors->reserve(metadata.tensor_metadata_size());
std::vector<Tensor> simple_tensors;
simple_tensors.reserve(num_simple_);
std::vector<std::pair<std::unique_ptr<char[]>, size_t>> tensor_proto_strs;
tensor_proto_strs.reserve(num_complex_);
TF_RETURN_IF_ERROR(
SnappyUncompress(&metadata, &simple_tensors, &tensor_proto_strs));
int simple_index = 0;
int complex_index = 0;
for (int i = 0, end = simple_tensor_mask_.size(); i < end; ++i) {
if (simple_tensor_mask_[i]) {
read_tensors->push_back(std::move(simple_tensors[simple_index]));
simple_index++;
} else {
auto tensor_proto_str = std::move(tensor_proto_strs[complex_index].first);
size_t tensor_proto_size = tensor_proto_strs[complex_index].second;
TensorProto tp;
if (!tp.ParseFromArray(tensor_proto_str.get(), tensor_proto_size)) {
return errors::Internal("Could not parse TensorProto");
}
Tensor t;
if (!t.FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
read_tensors->push_back(std::move(t));
complex_index++;
}
}
return absl::OkStatus();
}
Status CustomReader::ReadTensorsV0(std::vector<Tensor>* read_tensors) {
experimental::SnapshotRecord record;
#if defined(PLATFORM_GOOGLE)
absl::Cord c;
TF_RETURN_IF_ERROR(ReadRecord(&c));
record.ParseFromCord(c);
#else
tstring record_bytes;
TF_RETURN_IF_ERROR(ReadRecord(&record_bytes));
record.ParseFromArray(record_bytes.data(), record_bytes.size());
#endif
read_tensors->reserve(record.tensor_size());
for (int i = 0; i < record.tensor_size(); ++i) {
read_tensors->emplace_back();
if (!read_tensors->back().FromProto(record.tensor(i))) {
return errors::DataLoss("Unable to parse tensor from proto.");
}
}
return absl::OkStatus();
}
Status CustomReader::SnappyUncompress(
const experimental::SnapshotTensorMetadata* metadata,
std::vector<Tensor>* simple_tensors,
std::vector<std::pair<std::unique_ptr<char[]>, size_t>>*
tensor_proto_strs) {
tstring compressed;
TF_RETURN_IF_ERROR(ReadRecord(&compressed));
size_t size;
if (!tsl::port::Snappy_GetUncompressedLength(compressed.data(),
compressed.size(), &size)) {
return errors::Internal("Could not get snappy uncompressed length");
}
int num_tensors = metadata->tensor_metadata_size();
std::vector<tsl::iovec> iov(num_tensors);
int index = 0;
int64_t total_size = 0;
for (int i = 0, end = simple_tensor_mask_.size(); i < end; ++i) {
const auto& tensor_metadata = metadata->tensor_metadata(i);
if (simple_tensor_mask_[i]) {
TensorShape shape(tensor_metadata.tensor_shape());
Tensor simple_tensor(dtypes_[i], shape);
TensorBuffer* buffer = DMAHelper::buffer(&simple_tensor);
iov[index].iov_base = buffer->data();
iov[index].iov_len = buffer->size();
simple_tensors->push_back(std::move(simple_tensor));
} else {
auto tensor_proto_str =
std::make_unique<char[]>(tensor_metadata.tensor_size_bytes());
iov[index].iov_base = tensor_proto_str.get();
iov[index].iov_len = tensor_metadata.tensor_size_bytes();
tensor_proto_strs->push_back(std::make_pair(
std::move(tensor_proto_str), tensor_metadata.tensor_size_bytes()));
}
total_size += iov[index].iov_len;
index++;
}
const int64_t size_int = size;
if (size_int != total_size) {
return errors::Internal("Uncompressed size mismatch. Snappy expects ", size,
" whereas the tensor metadata suggests ",
total_size);
}
if (!tsl::port::Snappy_UncompressToIOVec(compressed.data(), compressed.size(),
iov.data(), num_tensors)) {
return errors::Internal("Failed to perform snappy decompression.");
}
return absl::OkStatus();
}
Status CustomReader::ReadRecord(tstring* record) {
tstring header;
TF_RETURN_IF_ERROR(input_stream_->ReadNBytes(kHeaderSize, &header));
uint64 length = core::DecodeFixed64(header.data());
return input_stream_->ReadNBytes(length, record);
}
#if defined(TF_CORD_SUPPORT)
Status CustomReader::ReadRecord(absl::Cord* record) {
tstring header;
TF_RETURN_IF_ERROR(input_stream_->ReadNBytes(kHeaderSize, &header));
uint64 length = core::DecodeFixed64(header.data());
if (compression_type_ == io::compression::kNone) {
return input_stream_->ReadNBytes(length, record);
} else {
auto tmp_str = new tstring();
TF_RETURN_IF_ERROR(input_stream_->ReadNBytes(length, tmp_str));
absl::string_view tmp_str_view(*tmp_str);
record->Append(absl::MakeCordFromExternal(
tmp_str_view, [tmp_str](absl::string_view) { delete tmp_str; }));
return absl::OkStatus();
}
}
#endif
Status WriteMetadataFile(Env* env, const string& dir,
const experimental::SnapshotMetadataRecord* metadata) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir));
std::string tmp_filename =
absl::StrCat(metadata_filename, "-tmp-", random::New64());
TF_RETURN_IF_ERROR(WriteBinaryProto(env, tmp_filename, *metadata));
return env->RenameFile(tmp_filename, metadata_filename);
}
Status WriteMetadataFile(
Env* env, const string& dir,
const experimental::DistributedSnapshotMetadata* metadata) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir));
std::string tmp_filename =
absl::StrCat(metadata_filename, "-tmp-", random::New64());
TF_RETURN_IF_ERROR(WriteBinaryProto(env, tmp_filename, *metadata));
return env->RenameFile(tmp_filename, metadata_filename);
}
Status ReadMetadataFile(Env* env, const string& dir,
experimental::SnapshotMetadataRecord* metadata,
bool* file_exists) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
Status s = env->FileExists(metadata_filename);
*file_exists = s.ok();
if (*file_exists) {
return ReadBinaryProto(env, metadata_filename, metadata);
} else {
return absl::OkStatus();
}
}
Status ReadMetadataFile(Env* env, const string& dir,
experimental::DistributedSnapshotMetadata* metadata,
bool* file_exists) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
Status s = env->FileExists(metadata_filename);
*file_exists = s.ok();
if (*file_exists) {
return ReadBinaryProto(env, metadata_filename, metadata);
} else {
return absl::OkStatus();
}
}
Status DumpDatasetGraph(Env* env, const std::string& path, uint64 hash,
const GraphDef* graph) {
std::string hash_hex =
strings::StrCat(strings::Hex(hash, strings::kZeroPad16));
std::string graph_file =
io::JoinPath(path, absl::StrCat(hash_hex, "-graph.pbtxt"));
LOG(INFO) << "Graph hash is " << hash_hex << ", writing to " << graph_file;
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(path));
return WriteTextProto(env, graph_file, *graph);
}
Status DetermineOpState(const std::string& mode_string, bool file_exists,
const experimental::SnapshotMetadataRecord* metadata,
const uint64 pending_snapshot_expiry_seconds,
Mode* mode) {
if (mode_string == kModeRead) {
if (!file_exists) {
return errors::NotFound("Metadata file does not exist.");
}
LOG(INFO) << "Overriding mode to reader.";
*mode = READER;
return absl::OkStatus();
}
if (mode_string == kModeWrite) {
LOG(INFO) << "Overriding mode to writer.";
*mode = WRITER;
return absl::OkStatus();
}
if (mode_string == kModePassthrough) {
LOG(INFO) << "Overriding mode to passthrough.";
*mode = PASSTHROUGH;
return absl::OkStatus();
}
if (!file_exists) {
*mode = WRITER;
return absl::OkStatus();
}
if (metadata->finalized()) {
*mode = READER;
return absl::OkStatus();
}
int64_t expiration_timer = static_cast<int64_t>(EnvTime::NowMicros()) -
pending_snapshot_expiry_seconds * 1000000;
if (metadata->creation_timestamp() >= expiration_timer) {
*mode = PASSTHROUGH;
return absl::OkStatus();
} else {
*mode = WRITER;
return absl::OkStatus();
}
}
AsyncWriter::AsyncWriter(Env* env, int64_t file_index,
const std::string& shard_directory,
uint64 checkpoint_id, const std::string& compression,
int64_t version, const DataTypeVector& output_types,
std::function<void(Status)> done) {
thread_ = absl::WrapUnique(env->StartThread(
ThreadOptions(), absl::StrCat("writer_thread_", file_index),
[this, env, shard_directory, checkpoint_id, compression, version,
&output_types, done = std::move(done)] {
done(WriterThread(env, shard_directory, checkpoint_id, compression,
version, output_types));
}));
}
void AsyncWriter::Write(const std::vector<Tensor>& tensors) {
mutex_lock l(mu_);
ElementOrEOF element;
element.value = tensors;
deque_.push_back(std::move(element));
}
void AsyncWriter::SignalEOF() {
mutex_lock l(mu_);
ElementOrEOF be;
be.end_of_sequence = true;
deque_.push_back(std::move(be));
}
void AsyncWriter::Consume(ElementOrEOF* be) {
mutex_lock l(mu_);
mu_.Await(tensorflow::Condition(this, &AsyncWriter::ElementAvailable));
*be = deque_.front();
deque_.pop_front();
}
bool AsyncWriter::ElementAvailable() { return !deque_.empty(); }
Status AsyncWriter::WriterThread(Env* env, const std::string& shard_directory,
uint64 checkpoint_id,
const std::string& compression,
int64_t version, DataTypeVector output_types) {
std::unique_ptr<snapshot_util::Writer> writer;
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(shard_directory));
TF_RETURN_IF_ERROR(snapshot_util::Writer::Create(
env, GetCheckpointFileName(shard_directory, checkpoint_id), compression,
version, std::move(output_types), &writer));
while (true) {
ElementOrEOF be;
Consume(&be);
if (be.end_of_sequence) {
TF_RETURN_IF_ERROR(writer->Close());
break;
}
TF_RETURN_IF_ERROR(writer->WriteTensors(be.value));
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SnapshotDatasetReader").Device(DEVICE_CPU),
Reader::DatasetOp);
REGISTER_KERNEL_BUILDER(Name("SnapshotNestedDatasetReader").Device(DEVICE_CPU),
Reader::NestedDatasetOp);
}
}
}
} | #include "tensorflow/core/data/snapshot_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/compression.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::LocalTempFilename;
void GenerateTensorVector(tensorflow::DataTypeVector& dtypes,
std::vector<Tensor>& tensors) {
std::string tensor_data(1024, 'a');
for (int i = 0; i < 10; ++i) {
Tensor t(tensor_data.data());
dtypes.push_back(t.dtype());
tensors.push_back(t);
}
}
void SnapshotRoundTrip(std::string compression_type, int version) {
std::vector<Tensor> tensors;
tensorflow::DataTypeVector dtypes;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (int i = 0; i < 100; ++i) {
TF_ASSERT_OK(writer->WriteTensors(tensors));
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (int i = 0; i < 100; ++i) {
std::vector<Tensor> read_tensors;
TF_ASSERT_OK(reader->ReadTensors(&read_tensors));
EXPECT_EQ(tensors.size(), read_tensors.size());
for (int j = 0; j < read_tensors.size(); ++j) {
TensorProto proto;
TensorProto read_proto;
tensors[j].AsProtoTensorContent(&proto);
read_tensors[j].AsProtoTensorContent(&read_proto);
std::string proto_serialized, read_proto_serialized;
proto.AppendToString(&proto_serialized);
read_proto.AppendToString(&read_proto_serialized);
EXPECT_EQ(proto_serialized, read_proto_serialized);
}
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
TEST(SnapshotUtilTest, CombinationRoundTripTest) {
SnapshotRoundTrip(io::compression::kNone, 1);
SnapshotRoundTrip(io::compression::kGzip, 1);
SnapshotRoundTrip(io::compression::kSnappy, 1);
SnapshotRoundTrip(io::compression::kNone, 2);
SnapshotRoundTrip(io::compression::kGzip, 2);
SnapshotRoundTrip(io::compression::kSnappy, 2);
}
TEST(SnapshotUtilTest, MetadataFileRoundTrip) {
experimental::DistributedSnapshotMetadata metadata_in;
metadata_in.set_compression(io::compression::kGzip);
std::string dir = LocalTempFilename();
TF_ASSERT_OK(WriteMetadataFile(Env::Default(), dir, &metadata_in));
experimental::DistributedSnapshotMetadata metadata_out;
bool file_exists;
TF_ASSERT_OK(
ReadMetadataFile(Env::Default(), dir, &metadata_out, &file_exists));
EXPECT_THAT(metadata_in, EqualsProto(metadata_out));
}
TEST(SnapshotUtilTest, MetadataFileDoesntExist) {
experimental::DistributedSnapshotMetadata metadata;
bool file_exists;
TF_ASSERT_OK(ReadMetadataFile(Env::Default(), LocalTempFilename(), &metadata,
&file_exists));
EXPECT_FALSE(file_exists);
}
void SnapshotReaderBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (auto s : state) {
std::vector<Tensor> read_tensors;
reader->ReadTensors(&read_tensors).IgnoreError();
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomReaderSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 2);
}
BENCHMARK(SnapshotCustomReaderNoneBenchmark);
BENCHMARK(SnapshotCustomReaderGzipBenchmark);
BENCHMARK(SnapshotCustomReaderSnappyBenchmark);
BENCHMARK(SnapshotTFRecordReaderNoneBenchmark);
BENCHMARK(SnapshotTFRecordReaderGzipBenchmark);
void SnapshotWriterBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
writer->Close().IgnoreError();
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 2);
}
void SnapshotTFRecordWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 2);
}
BENCHMARK(SnapshotCustomWriterNoneBenchmark);
BENCHMARK(SnapshotCustomWriterGzipBenchmark);
BENCHMARK(SnapshotCustomWriterSnappyBenchmark);
BENCHMARK(SnapshotTFRecordWriterNoneBenchmark);
BENCHMARK(SnapshotTFRecordWriterGzipBenchmark);
BENCHMARK(SnapshotTFRecordWriterSnappyBenchmark);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/snapshot_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/snapshot_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c4bfa72-7d6e-4c9a-b6e8-32a2e923bbb6 | cpp | tensorflow/tensorflow | nnapi_delegate_c_api | tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.cc | tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api_test.cc | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
TfLiteDelegate* TfLiteNnapiDelegateCreate(
const TfLiteNnapiDelegateOptions* options) {
tflite::StatefulNnApiDelegate::StatefulNnApiDelegate::Options
internal_options;
internal_options.execution_preference =
static_cast<tflite::StatefulNnApiDelegate::StatefulNnApiDelegate::
Options::ExecutionPreference>(
options->execution_preference);
internal_options.accelerator_name = options->accelerator_name;
internal_options.cache_dir = options->cache_dir;
internal_options.model_token = options->model_token;
internal_options.disallow_nnapi_cpu = options->disallow_nnapi_cpu;
internal_options.max_number_delegated_partitions =
options->max_number_delegated_partitions;
internal_options.allow_fp16 = options->allow_fp16;
tflite::StatefulNnApiDelegate* delegate = nullptr;
if (options->nnapi_support_library_handle) {
delegate = new tflite::StatefulNnApiDelegate(
static_cast<NnApiSLDriverImplFL5*>(
options->nnapi_support_library_handle),
internal_options);
} else {
delegate = new tflite::StatefulNnApiDelegate(internal_options);
}
return delegate;
}
TfLiteNnapiDelegateOptions TfLiteNnapiDelegateOptionsDefault() {
TfLiteNnapiDelegateOptions result = {};
tflite::StatefulNnApiDelegate::Options options;
result.execution_preference =
static_cast<TfLiteNnapiDelegateOptions::ExecutionPreference>(
options.execution_preference);
result.accelerator_name = options.accelerator_name;
result.cache_dir = options.cache_dir;
result.model_token = options.model_token;
result.disallow_nnapi_cpu = options.disallow_nnapi_cpu;
result.max_number_delegated_partitions =
options.max_number_delegated_partitions;
result.allow_fp16 = options.allow_fp16;
result.nnapi_support_library_handle = nullptr;
return result;
}
void TfLiteNnapiDelegateDelete(TfLiteDelegate* delegate) {
if (delegate == nullptr) return;
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
} | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.h"
#include <sys/mman.h>
#include <algorithm>
#include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class SingleOpModelWithNnapiDelegateCApi : public SingleOpModel {
public:
SingleOpModelWithNnapiDelegateCApi() {
options_ = TfLiteNnapiDelegateOptionsDefault();
options_.disallow_nnapi_cpu = false;
}
explicit SingleOpModelWithNnapiDelegateCApi(
const TfLiteNnapiDelegateOptions& options) {
options_ = options;
options_.disallow_nnapi_cpu = false;
}
~SingleOpModelWithNnapiDelegateCApi() {
if (nnapi_delegate_) {
TfLiteNnapiDelegateDelete(nnapi_delegate_);
}
nnapi_delegate_ = nullptr;
}
protected:
void BuildInterpreterWithNNAPI(std::vector<std::vector<int>> input_shapes) {
if (nnapi_delegate_) {
TfLiteNnapiDelegateDelete(nnapi_delegate_);
}
nnapi_delegate_ = TfLiteNnapiDelegateCreate(&options_);
SetDelegate(nnapi_delegate_);
BuildInterpreter(input_shapes, -1, options_.allow_fp16,
true, true);
}
private:
TfLiteNnapiDelegateOptions options_;
TfLiteDelegate* nnapi_delegate_ = nullptr;
};
class FloatAddOpModel : public SingleOpModelWithNnapiDelegateCApi {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
Init(input1, input2, output, activation_type);
}
FloatAddOpModel(const TfLiteNnapiDelegateOptions& options,
const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type)
: SingleOpModelWithNnapiDelegateCApi(options) {
Init(input1, input2, output, activation_type);
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
private:
void Init(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
};
TEST(NNAPIDelegate, C_API) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, C_API_WithAcceleratorName) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
options.accelerator_name = "nnapi-reference";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, C_API_WithCompilationCaching) {
TfLiteNnapiDelegateOptions options = TfLiteNnapiDelegateOptionsDefault();
options.execution_preference =
TfLiteNnapiDelegateOptions::ExecutionPreference::kLowPower;
options.cache_dir = "/data/local/tmp";
options.model_token = "NNAPIDelegate.C_API_WithCompilationCaching";
{
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
{
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-1.0, 0.1, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.2, 0.2, 0.4, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-0.8, 0.3, 1.1, 1.0}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a728add-5ac0-4d23-a38f-13f2c4ccb9dd | cpp | google/cel-cpp | cel_expression_builder_flat_impl | eval/compiler/cel_expression_builder_flat_impl.cc | eval/compiler/cel_expression_builder_flat_impl_test.cc | #include "eval/compiler/cel_expression_builder_flat_impl.h"
#include <memory>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/base/macros.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/ast.h"
#include "common/native_type.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/public/cel_expression.h"
#include "extensions/protobuf/ast_converters.h"
#include "internal/status_macros.h"
#include "runtime/runtime_issue.h"
namespace google::api::expr::runtime {
using ::cel::Ast;
using ::cel::RuntimeIssue;
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::SourceInfo;
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const Expr* expr, const SourceInfo* source_info,
std::vector<absl::Status>* warnings) const {
ABSL_ASSERT(expr != nullptr);
CEL_ASSIGN_OR_RETURN(
std::unique_ptr<Ast> converted_ast,
cel::extensions::CreateAstFromParsedExpr(*expr, source_info));
return CreateExpressionImpl(std::move(converted_ast), warnings);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const Expr* expr, const SourceInfo* source_info) const {
return CreateExpression(expr, source_info,
nullptr);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const CheckedExpr* checked_expr,
std::vector<absl::Status>* warnings) const {
ABSL_ASSERT(checked_expr != nullptr);
CEL_ASSIGN_OR_RETURN(
std::unique_ptr<Ast> converted_ast,
cel::extensions::CreateAstFromCheckedExpr(*checked_expr));
return CreateExpressionImpl(std::move(converted_ast), warnings);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpression(
const CheckedExpr* checked_expr) const {
return CreateExpression(checked_expr, nullptr);
}
absl::StatusOr<std::unique_ptr<CelExpression>>
CelExpressionBuilderFlatImpl::CreateExpressionImpl(
std::unique_ptr<Ast> converted_ast,
std::vector<absl::Status>* warnings) const {
std::vector<RuntimeIssue> issues;
auto* issues_ptr = (warnings != nullptr) ? &issues : nullptr;
CEL_ASSIGN_OR_RETURN(FlatExpression impl,
flat_expr_builder_.CreateExpressionImpl(
std::move(converted_ast), issues_ptr));
if (issues_ptr != nullptr) {
for (const auto& issue : issues) {
warnings->push_back(issue.ToStatus());
}
}
if (flat_expr_builder_.options().max_recursion_depth != 0 &&
!impl.subexpressions().empty() &&
impl.subexpressions().front().size() == 1 &&
impl.subexpressions().front().front()->GetNativeTypeId() ==
cel::NativeTypeId::For<WrappedDirectStep>()) {
return CelExpressionRecursiveImpl::Create(std::move(impl));
}
return std::make_unique<CelExpressionFlatImpl>(std::move(impl));
}
} | #include "eval/compiler/cel_expression_builder_flat_impl.h"
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "eval/compiler/constant_folding.h"
#include "eval/compiler/regex_precompilation_optimization.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/portable_cel_function_adapter.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include "eval/public/testing/matchers.h"
#include "extensions/bindings_ext.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "parser/macro.h"
#include "parser/parser.h"
#include "runtime/runtime_options.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::parser::Macro;
using ::google::api::expr::parser::Parse;
using ::google::api::expr::parser::ParseWithMacros;
using ::google::api::expr::test::v1::proto3::NestedTestAllTypes;
using ::google::api::expr::test::v1::proto3::TestAllTypes;
using ::testing::_;
using ::testing::Contains;
using ::testing::HasSubstr;
using ::testing::IsNull;
using ::testing::NotNull;
TEST(CelExpressionBuilderFlatImplTest, Error) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid empty expression")));
}
TEST(CelExpressionBuilderFlatImplTest, ParsedExpr) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(3));
}
struct RecursiveTestCase {
std::string test_name;
std::string expr;
test::CelValueMatcher matcher;
};
class RecursivePlanTest : public ::testing::TestWithParam<RecursiveTestCase> {
protected:
absl::Status SetupBuilder(CelExpressionBuilderFlatImpl& builder) {
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.GetTypeRegistry()->RegisterEnum("TestEnum",
{{"FOO", 1}, {"BAR", 2}});
CEL_RETURN_IF_ERROR(RegisterBuiltinFunctions(builder.GetRegistry()));
return builder.GetRegistry()->RegisterLazyFunction(CelFunctionDescriptor(
"LazilyBoundMult", false,
{CelValue::Type::kInt64, CelValue::Type::kInt64}));
}
absl::Status SetupActivation(Activation& activation, google::protobuf::Arena* arena) {
activation.InsertValue("int_1", CelValue::CreateInt64(1));
activation.InsertValue("string_abc", CelValue::CreateStringView("abc"));
activation.InsertValue("string_def", CelValue::CreateStringView("def"));
auto* map = google::protobuf::Arena::Create<CelMapBuilder>(arena);
CEL_RETURN_IF_ERROR(
map->Add(CelValue::CreateStringView("a"), CelValue::CreateInt64(1)));
CEL_RETURN_IF_ERROR(
map->Add(CelValue::CreateStringView("b"), CelValue::CreateInt64(2)));
activation.InsertValue("map_var", CelValue::CreateMap(map));
auto* msg = google::protobuf::Arena::Create<NestedTestAllTypes>(arena);
msg->mutable_child()->mutable_payload()->set_single_int64(42);
activation.InsertValue("struct_var",
CelProtoWrapper::CreateMessage(msg, arena));
activation.InsertValue("TestEnum.BAR", CelValue::CreateInt64(-1));
CEL_RETURN_IF_ERROR(activation.InsertFunction(
PortableBinaryFunctionAdapter<int64_t, int64_t, int64_t>::Create(
"LazilyBoundMult", false,
[](google::protobuf::Arena*, int64_t lhs, int64_t rhs) -> int64_t {
return lhs * rhs;
})));
return absl::OkStatus();
}
};
absl::StatusOr<ParsedExpr> ParseWithBind(absl::string_view cel) {
static const std::vector<Macro>* kMacros = []() {
auto* result = new std::vector<Macro>(Macro::AllMacros());
absl::c_copy(cel::extensions::bindings_macros(),
std::back_inserter(*result));
return result;
}();
return ParseWithMacros(cel, *kMacros, "<input>");
}
TEST_P(RecursivePlanTest, ParsedExprRecursiveImpl) {
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
options.max_recursion_depth = -1;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
NotNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test_case.matcher);
}
TEST_P(RecursivePlanTest, ParsedExprRecursiveOptimizedImpl) {
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
options.max_recursion_depth = -1;
options.enable_comprehension_list_append = true;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
builder.flat_expr_builder().AddProgramOptimizer(
cel::runtime_internal::CreateConstantFoldingOptimizer(
cel::extensions::ProtoMemoryManagerRef(&arena)));
builder.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
NotNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test_case.matcher);
}
TEST_P(RecursivePlanTest, ParsedExprRecursiveTraceSupport) {
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
auto cb = [](int64_t id, const CelValue& value, google::protobuf::Arena* arena) {
return absl::OkStatus();
};
options.max_recursion_depth = -1;
options.enable_recursive_tracing = true;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
NotNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Trace(activation, &arena, cb));
EXPECT_THAT(result, test_case.matcher);
}
TEST_P(RecursivePlanTest, Disabled) {
google::protobuf::LinkMessageReflection<TestAllTypes>();
const RecursiveTestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, ParseWithBind(test_case.expr));
cel::RuntimeOptions options;
options.container = "google.api.expr.test.v1.proto3";
google::protobuf::Arena arena;
options.max_recursion_depth = 0;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(SetupBuilder(builder));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
EXPECT_THAT(dynamic_cast<const CelExpressionRecursiveImpl*>(plan.get()),
IsNull());
Activation activation;
ASSERT_OK(SetupActivation(activation, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test_case.matcher);
}
INSTANTIATE_TEST_SUITE_P(
RecursivePlanTest, RecursivePlanTest,
testing::ValuesIn(std::vector<RecursiveTestCase>{
{"constant", "'abc'", test::IsCelString("abc")},
{"call", "1 + 2", test::IsCelInt64(3)},
{"nested_call", "1 + 1 + 1 + 1", test::IsCelInt64(4)},
{"and", "true && false", test::IsCelBool(false)},
{"or", "true || false", test::IsCelBool(true)},
{"ternary", "(true || false) ? 2 + 2 : 3 + 3", test::IsCelInt64(4)},
{"create_list", "3 in [1, 2, 3]", test::IsCelBool(true)},
{"create_list_complex", "3 in [2 / 2, 4 / 2, 6 / 2]",
test::IsCelBool(true)},
{"ident", "int_1 == 1", test::IsCelBool(true)},
{"ident_complex", "int_1 + 2 > 4 ? string_abc : string_def",
test::IsCelString("def")},
{"select", "struct_var.child.payload.single_int64",
test::IsCelInt64(42)},
{"nested_select", "[map_var.a, map_var.b].size() == 2",
test::IsCelBool(true)},
{"map_index", "map_var['b']", test::IsCelInt64(2)},
{"list_index", "[1, 2, 3][1]", test::IsCelInt64(2)},
{"compre_exists", "[1, 2, 3, 4].exists(x, x == 3)",
test::IsCelBool(true)},
{"compre_map", "8 in [1, 2, 3, 4].map(x, x * 2)",
test::IsCelBool(true)},
{"map_var_compre_exists", "map_var.exists(key, key == 'b')",
test::IsCelBool(true)},
{"map_compre_exists", "{'a': 1, 'b': 2}.exists(k, k == 'b')",
test::IsCelBool(true)},
{"create_map", "{'a': 42, 'b': 0, 'c': 0}.size()", test::IsCelInt64(3)},
{"create_struct",
"NestedTestAllTypes{payload: TestAllTypes{single_int64: "
"-42}}.payload.single_int64",
test::IsCelInt64(-42)},
{"bind", R"(cel.bind(x, "1", x + x + x + x))",
test::IsCelString("1111")},
{"nested_bind", R"(cel.bind(x, 20, cel.bind(y, 30, x + y)))",
test::IsCelInt64(50)},
{"bind_with_comprehensions",
R"(cel.bind(x, [1, 2], cel.bind(y, x.map(z, z * 2), y.exists(z, z == 4))))",
test::IsCelBool(true)},
{"shadowable_value_default", R"(TestEnum.FOO == 1)",
test::IsCelBool(true)},
{"shadowable_value_shadowed", R"(TestEnum.BAR == -1)",
test::IsCelBool(true)},
{"lazily_resolved_function", "LazilyBoundMult(123, 2) == 246",
test::IsCelBool(true)},
{"re_matches", "matches(string_abc, '[ad][be][cf]')",
test::IsCelBool(true)},
{"re_matches_receiver",
"(string_abc + string_def).matches(r'(123)?' + r'abc' + r'def')",
test::IsCelBool(true)},
}),
[](const testing::TestParamInfo<RecursiveTestCase>& info) -> std::string {
return info.param.test_name;
});
TEST(CelExpressionBuilderFlatImplTest, ParsedExprWithWarnings) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> warnings;
ASSERT_OK_AND_ASSIGN(
std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info(),
&warnings));
EXPECT_THAT(warnings, Contains(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("No overloads"))));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelError(
StatusIs(_, HasSubstr("No matching overloads"))));
}
TEST(CelExpressionBuilderFlatImplTest, CheckedExpr) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CheckedExpr checked_expr;
checked_expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
checked_expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&checked_expr));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(3));
}
TEST(CelExpressionBuilderFlatImplTest, CheckedExprWithWarnings) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse("1 + 2"));
CheckedExpr checked_expr;
checked_expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
checked_expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> warnings;
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder.CreateExpression(&checked_expr, &warnings));
EXPECT_THAT(warnings, Contains(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("No overloads"))));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelError(
StatusIs(_, HasSubstr("No matching overloads"))));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/cel_expression_builder_flat_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/cel_expression_builder_flat_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
adc22d8c-272f-464f-8084-ea7c051b14e6 | cpp | google/cel-cpp | message_equality | internal/message_equality.cc | internal/message_equality_test.cc | #include "internal/message_equality.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/functional/overload.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/variant.h"
#include "common/memory.h"
#include "extensions/protobuf/internal/map_reflection.h"
#include "internal/json.h"
#include "internal/number.h"
#include "internal/status_macros.h"
#include "internal/well_known_types.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/util/message_differencer.h"
namespace cel::internal {
namespace {
using ::cel::extensions::protobuf_internal::LookupMapValue;
using ::cel::extensions::protobuf_internal::MapBegin;
using ::cel::extensions::protobuf_internal::MapEnd;
using ::cel::extensions::protobuf_internal::MapSize;
using ::google::protobuf::Descriptor;
using ::google::protobuf::DescriptorPool;
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::Message;
using ::google::protobuf::MessageFactory;
using ::google::protobuf::util::MessageDifferencer;
class EquatableListValue final
: public std::reference_wrapper<const google::protobuf::Message> {
public:
using std::reference_wrapper<const google::protobuf::Message>::reference_wrapper;
};
class EquatableStruct final
: public std::reference_wrapper<const google::protobuf::Message> {
public:
using std::reference_wrapper<const google::protobuf::Message>::reference_wrapper;
};
class EquatableAny final
: public std::reference_wrapper<const google::protobuf::Message> {
public:
using std::reference_wrapper<const google::protobuf::Message>::reference_wrapper;
};
class EquatableMessage final
: public std::reference_wrapper<const google::protobuf::Message> {
public:
using std::reference_wrapper<const google::protobuf::Message>::reference_wrapper;
};
using EquatableValue =
absl::variant<std::nullptr_t, bool, int64_t, uint64_t, double,
well_known_types::BytesValue, well_known_types::StringValue,
absl::Duration, absl::Time, EquatableListValue,
EquatableStruct, EquatableAny, EquatableMessage>;
struct NullValueEqualer {
bool operator()(std::nullptr_t, std::nullptr_t) const { return true; }
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<std::nullptr_t, T>>, bool>
operator()(std::nullptr_t, const T&) const {
return false;
}
};
struct BoolValueEqualer {
bool operator()(bool lhs, bool rhs) const { return lhs == rhs; }
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<bool, T>>, bool> operator()(
bool, const T&) const {
return false;
}
};
struct BytesValueEqualer {
bool operator()(const well_known_types::BytesValue& lhs,
const well_known_types::BytesValue& rhs) const {
return lhs == rhs;
}
template <typename T>
std::enable_if_t<
std::negation_v<std::is_same<well_known_types::BytesValue, T>>, bool>
operator()(const well_known_types::BytesValue&, const T&) const {
return false;
}
};
struct IntValueEqualer {
bool operator()(int64_t lhs, int64_t rhs) const { return lhs == rhs; }
bool operator()(int64_t lhs, uint64_t rhs) const {
return Number::FromInt64(lhs) == Number::FromUint64(rhs);
}
bool operator()(int64_t lhs, double rhs) const {
return Number::FromInt64(lhs) == Number::FromDouble(rhs);
}
template <typename T>
std::enable_if_t<std::conjunction_v<std::negation<std::is_same<int64_t, T>>,
std::negation<std::is_same<uint64_t, T>>,
std::negation<std::is_same<double, T>>>,
bool>
operator()(int64_t, const T&) const {
return false;
}
};
struct UintValueEqualer {
bool operator()(uint64_t lhs, int64_t rhs) const {
return Number::FromUint64(lhs) == Number::FromInt64(rhs);
}
bool operator()(uint64_t lhs, uint64_t rhs) const { return lhs == rhs; }
bool operator()(uint64_t lhs, double rhs) const {
return Number::FromUint64(lhs) == Number::FromDouble(rhs);
}
template <typename T>
std::enable_if_t<std::conjunction_v<std::negation<std::is_same<int64_t, T>>,
std::negation<std::is_same<uint64_t, T>>,
std::negation<std::is_same<double, T>>>,
bool>
operator()(uint64_t, const T&) const {
return false;
}
};
struct DoubleValueEqualer {
bool operator()(double lhs, int64_t rhs) const {
return Number::FromDouble(lhs) == Number::FromInt64(rhs);
}
bool operator()(double lhs, uint64_t rhs) const {
return Number::FromDouble(lhs) == Number::FromUint64(rhs);
}
bool operator()(double lhs, double rhs) const { return lhs == rhs; }
template <typename T>
std::enable_if_t<std::conjunction_v<std::negation<std::is_same<int64_t, T>>,
std::negation<std::is_same<uint64_t, T>>,
std::negation<std::is_same<double, T>>>,
bool>
operator()(double, const T&) const {
return false;
}
};
struct StringValueEqualer {
bool operator()(const well_known_types::StringValue& lhs,
const well_known_types::StringValue& rhs) const {
return lhs == rhs;
}
template <typename T>
std::enable_if_t<
std::negation_v<std::is_same<well_known_types::StringValue, T>>, bool>
operator()(const well_known_types::StringValue&, const T&) const {
return false;
}
};
struct DurationEqualer {
bool operator()(absl::Duration lhs, absl::Duration rhs) const {
return lhs == rhs;
}
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<absl::Duration, T>>, bool>
operator()(absl::Duration, const T&) const {
return false;
}
};
struct TimestampEqualer {
bool operator()(absl::Time lhs, absl::Time rhs) const { return lhs == rhs; }
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<absl::Time, T>>, bool>
operator()(absl::Time, const T&) const {
return false;
}
};
struct ListValueEqualer {
bool operator()(EquatableListValue lhs, EquatableListValue rhs) const {
return JsonListEquals(lhs, rhs);
}
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<EquatableListValue, T>>, bool>
operator()(EquatableListValue, const T&) const {
return false;
}
};
struct StructEqualer {
bool operator()(EquatableStruct lhs, EquatableStruct rhs) const {
return JsonMapEquals(lhs, rhs);
}
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<EquatableStruct, T>>, bool>
operator()(EquatableStruct, const T&) const {
return false;
}
};
struct AnyEqualer {
bool operator()(EquatableAny lhs, EquatableAny rhs) const {
auto lhs_reflection =
well_known_types::GetAnyReflectionOrDie(lhs.get().GetDescriptor());
std::string lhs_type_url_scratch;
std::string lhs_value_scratch;
auto rhs_reflection =
well_known_types::GetAnyReflectionOrDie(rhs.get().GetDescriptor());
std::string rhs_type_url_scratch;
std::string rhs_value_scratch;
return lhs_reflection.GetTypeUrl(lhs.get(), lhs_type_url_scratch) ==
rhs_reflection.GetTypeUrl(rhs.get(), rhs_type_url_scratch) &&
lhs_reflection.GetValue(lhs.get(), lhs_value_scratch) ==
rhs_reflection.GetValue(rhs.get(), rhs_value_scratch);
}
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<EquatableAny, T>>, bool>
operator()(EquatableAny, const T&) const {
return false;
}
};
struct MessageEqualer {
bool operator()(EquatableMessage lhs, EquatableMessage rhs) const {
return lhs.get().GetDescriptor() == rhs.get().GetDescriptor() &&
MessageDifferencer::Equals(lhs.get(), rhs.get());
}
template <typename T>
std::enable_if_t<std::negation_v<std::is_same<EquatableMessage, T>>, bool>
operator()(EquatableMessage, const T&) const {
return false;
}
};
struct EquatableValueReflection final {
well_known_types::DoubleValueReflection double_value_reflection;
well_known_types::FloatValueReflection float_value_reflection;
well_known_types::Int64ValueReflection int64_value_reflection;
well_known_types::UInt64ValueReflection uint64_value_reflection;
well_known_types::Int32ValueReflection int32_value_reflection;
well_known_types::UInt32ValueReflection uint32_value_reflection;
well_known_types::StringValueReflection string_value_reflection;
well_known_types::BytesValueReflection bytes_value_reflection;
well_known_types::BoolValueReflection bool_value_reflection;
well_known_types::AnyReflection any_reflection;
well_known_types::DurationReflection duration_reflection;
well_known_types::TimestampReflection timestamp_reflection;
well_known_types::ValueReflection value_reflection;
well_known_types::ListValueReflection list_value_reflection;
well_known_types::StructReflection struct_reflection;
};
absl::StatusOr<EquatableValue> AsEquatableValue(
EquatableValueReflection& reflection,
const Message& message ABSL_ATTRIBUTE_LIFETIME_BOUND,
absl::Nonnull<const Descriptor*> descriptor,
Descriptor::WellKnownType well_known_type,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
switch (well_known_type) {
case Descriptor::WELLKNOWNTYPE_DOUBLEVALUE:
CEL_RETURN_IF_ERROR(
reflection.double_value_reflection.Initialize(descriptor));
return reflection.double_value_reflection.GetValue(message);
case Descriptor::WELLKNOWNTYPE_FLOATVALUE:
CEL_RETURN_IF_ERROR(
reflection.float_value_reflection.Initialize(descriptor));
return static_cast<double>(
reflection.float_value_reflection.GetValue(message));
case Descriptor::WELLKNOWNTYPE_INT64VALUE:
CEL_RETURN_IF_ERROR(
reflection.int64_value_reflection.Initialize(descriptor));
return reflection.int64_value_reflection.GetValue(message);
case Descriptor::WELLKNOWNTYPE_UINT64VALUE:
CEL_RETURN_IF_ERROR(
reflection.uint64_value_reflection.Initialize(descriptor));
return reflection.uint64_value_reflection.GetValue(message);
case Descriptor::WELLKNOWNTYPE_INT32VALUE:
CEL_RETURN_IF_ERROR(
reflection.int32_value_reflection.Initialize(descriptor));
return static_cast<int64_t>(
reflection.int32_value_reflection.GetValue(message));
case Descriptor::WELLKNOWNTYPE_UINT32VALUE:
CEL_RETURN_IF_ERROR(
reflection.uint32_value_reflection.Initialize(descriptor));
return static_cast<uint64_t>(
reflection.uint32_value_reflection.GetValue(message));
case Descriptor::WELLKNOWNTYPE_STRINGVALUE:
CEL_RETURN_IF_ERROR(
reflection.string_value_reflection.Initialize(descriptor));
return reflection.string_value_reflection.GetValue(message, scratch);
case Descriptor::WELLKNOWNTYPE_BYTESVALUE:
CEL_RETURN_IF_ERROR(
reflection.bytes_value_reflection.Initialize(descriptor));
return reflection.bytes_value_reflection.GetValue(message, scratch);
case Descriptor::WELLKNOWNTYPE_BOOLVALUE:
CEL_RETURN_IF_ERROR(
reflection.bool_value_reflection.Initialize(descriptor));
return reflection.bool_value_reflection.GetValue(message);
case Descriptor::WELLKNOWNTYPE_VALUE: {
CEL_RETURN_IF_ERROR(reflection.value_reflection.Initialize(descriptor));
const auto kind_case = reflection.value_reflection.GetKindCase(message);
switch (kind_case) {
case google::protobuf::Value::KIND_NOT_SET:
ABSL_FALLTHROUGH_INTENDED;
case google::protobuf::Value::kNullValue:
return nullptr;
case google::protobuf::Value::kBoolValue:
return reflection.value_reflection.GetBoolValue(message);
case google::protobuf::Value::kNumberValue:
return reflection.value_reflection.GetNumberValue(message);
case google::protobuf::Value::kStringValue:
return reflection.value_reflection.GetStringValue(message, scratch);
case google::protobuf::Value::kListValue:
return EquatableListValue(
reflection.value_reflection.GetListValue(message));
case google::protobuf::Value::kStructValue:
return EquatableStruct(
reflection.value_reflection.GetStructValue(message));
default:
return absl::InternalError(
absl::StrCat("unexpected value kind case: ", kind_case));
}
}
case Descriptor::WELLKNOWNTYPE_LISTVALUE:
return EquatableListValue(message);
case Descriptor::WELLKNOWNTYPE_STRUCT:
return EquatableStruct(message);
case Descriptor::WELLKNOWNTYPE_DURATION:
CEL_RETURN_IF_ERROR(
reflection.duration_reflection.Initialize(descriptor));
return reflection.duration_reflection.ToAbslDuration(message);
case Descriptor::WELLKNOWNTYPE_TIMESTAMP:
CEL_RETURN_IF_ERROR(
reflection.timestamp_reflection.Initialize(descriptor));
return reflection.timestamp_reflection.ToAbslTime(message);
case Descriptor::WELLKNOWNTYPE_ANY:
return EquatableAny(message);
default:
return EquatableMessage(message);
}
}
absl::StatusOr<EquatableValue> AsEquatableValue(
EquatableValueReflection& reflection,
const Message& message ABSL_ATTRIBUTE_LIFETIME_BOUND,
absl::Nonnull<const Descriptor*> descriptor,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return AsEquatableValue(reflection, message, descriptor,
descriptor->well_known_type(), scratch);
}
absl::StatusOr<EquatableValue> AsEquatableValue(
EquatableValueReflection& reflection,
const Message& message ABSL_ATTRIBUTE_LIFETIME_BOUND,
absl::Nonnull<const FieldDescriptor*> field,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
ABSL_DCHECK(!field->is_repeated() && !field->is_map());
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
return static_cast<int64_t>(
message.GetReflection()->GetInt32(message, field));
case FieldDescriptor::CPPTYPE_INT64:
return message.GetReflection()->GetInt64(message, field);
case FieldDescriptor::CPPTYPE_UINT32:
return static_cast<uint64_t>(
message.GetReflection()->GetUInt32(message, field));
case FieldDescriptor::CPPTYPE_UINT64:
return message.GetReflection()->GetUInt64(message, field);
case FieldDescriptor::CPPTYPE_DOUBLE:
return message.GetReflection()->GetDouble(message, field);
case FieldDescriptor::CPPTYPE_FLOAT:
return static_cast<double>(
message.GetReflection()->GetFloat(message, field));
case FieldDescriptor::CPPTYPE_BOOL:
return message.GetReflection()->GetBool(message, field);
case FieldDescriptor::CPPTYPE_ENUM:
if (field->enum_type()->full_name() == "google.protobuf.NullValue") {
return nullptr;
}
return static_cast<int64_t>(
message.GetReflection()->GetEnumValue(message, field));
case FieldDescriptor::CPPTYPE_STRING:
if (field->type() == FieldDescriptor::TYPE_BYTES) {
return well_known_types::GetBytesField(message, field, scratch);
}
return well_known_types::GetStringField(message, field, scratch);
case FieldDescriptor::CPPTYPE_MESSAGE:
return AsEquatableValue(
reflection, message.GetReflection()->GetMessage(message, field),
field->message_type(), scratch);
default:
return absl::InternalError(
absl::StrCat("unexpected field type: ", field->cpp_type_name()));
}
}
bool IsAny(const Message& message) {
return message.GetDescriptor()->well_known_type() ==
Descriptor::WELLKNOWNTYPE_ANY;
}
bool IsAnyField(absl::Nonnull<const FieldDescriptor*> field) {
return field->type() == FieldDescriptor::TYPE_MESSAGE &&
field->message_type()->well_known_type() ==
Descriptor::WELLKNOWNTYPE_ANY;
}
absl::StatusOr<EquatableValue> MapValueAsEquatableValue(
absl::Nonnull<google::protobuf::Arena*> arena,
absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<MessageFactory*> factory,
EquatableValueReflection& reflection, const google::protobuf::MapValueConstRef& value,
absl::Nonnull<const FieldDescriptor*> field, std::string& scratch,
Unique<Message>& unpacked) {
if (IsAnyField(field)) {
CEL_ASSIGN_OR_RETURN(unpacked, well_known_types::UnpackAnyIfResolveable(
arena, reflection.any_reflection,
value.GetMessageValue(), pool, factory));
if (unpacked) {
return AsEquatableValue(reflection, *unpacked, unpacked->GetDescriptor(),
scratch);
}
return AsEquatableValue(reflection, value.GetMessageValue(),
value.GetMessageValue().GetDescriptor(), scratch);
}
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
return static_cast<int64_t>(value.GetInt32Value());
case FieldDescriptor::CPPTYPE_INT64:
return value.GetInt64Value();
case FieldDescriptor::CPPTYPE_UINT32:
return static_cast<uint64_t>(value.GetUInt32Value());
case FieldDescriptor::CPPTYPE_UINT64:
return value.GetUInt64Value();
case FieldDescriptor::CPPTYPE_DOUBLE:
return value.GetDoubleValue();
case FieldDescriptor::CPPTYPE_FLOAT:
return static_cast<double>(value.GetFloatValue());
case FieldDescriptor::CPPTYPE_BOOL:
return value.GetBoolValue();
case FieldDescriptor::CPPTYPE_ENUM:
if (field->enum_type()->full_name() == "google.protobuf.NullValue") {
return nullptr;
}
return static_cast<int64_t>(value.GetEnumValue());
case FieldDescriptor::CPPTYPE_STRING:
if (field->type() == FieldDescriptor::TYPE_BYTES) {
return well_known_types::BytesValue(
absl::string_view(value.GetStringValue()));
}
return well_known_types::StringValue(
absl::string_view(value.GetStringValue()));
case FieldDescriptor::CPPTYPE_MESSAGE: {
const auto& message = value.GetMessageValue();
return AsEquatableValue(reflection, message, message.GetDescriptor(),
scratch);
}
default:
return absl::InternalError(
absl::StrCat("unexpected field type: ", field->cpp_type_name()));
}
}
absl::StatusOr<EquatableValue> RepeatedFieldAsEquatableValue(
absl::Nonnull<google::protobuf::Arena*> arena,
absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<MessageFactory*> factory,
EquatableValueReflection& reflection, const Message& message,
absl::Nonnull<const FieldDescriptor*> field, int index,
std::string& scratch, Unique<Message>& unpacked) {
if (IsAnyField(field)) {
const auto& field_value =
message.GetReflection()->GetRepeatedMessage(message, field, index);
CEL_ASSIGN_OR_RETURN(unpacked, well_known_types::UnpackAnyIfResolveable(
arena, reflection.any_reflection,
field_value, pool, factory));
if (unpacked) {
return AsEquatableValue(reflection, *unpacked, unpacked->GetDescriptor(),
scratch);
}
return AsEquatableValue(reflection, field_value,
field_value.GetDescriptor(), scratch);
}
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_INT32:
return static_cast<int64_t>(
message.GetReflection()->GetRepeatedInt32(message, field, index));
case FieldDescriptor::CPPTYPE_INT64:
return message.GetReflection()->GetRepeatedInt64(message, field, index);
case FieldDescriptor::CPPTYPE_UINT32:
return static_cast<uint64_t>(
message.GetReflection()->GetRepeatedUInt32(message, field, index));
case FieldDescriptor::CPPTYPE_UINT64:
return message.GetReflection()->GetRepeatedUInt64(message, field, index);
case FieldDescriptor::CPPTYPE_DOUBLE:
return message.GetReflection()->GetRepeatedDouble(message, field, index);
case FieldDescriptor::CPPTYPE_FLOAT:
return static_cast<double>(
message.GetReflection()->GetRepeatedFloat(message, field, index));
case FieldDescriptor::CPPTYPE_BOOL:
return message.GetReflection()->GetRepeatedBool(message, field, index);
case FieldDescriptor::CPPTYPE_ENUM:
if (field->enum_type()->full_name() == "google.protobuf.NullValue") {
return nullptr;
}
return static_cast<int64_t>(
message.GetReflection()->GetRepeatedEnumValue(message, field, index));
case FieldDescriptor::CPPTYPE_STRING:
if (field->type() == FieldDescriptor::TYPE_BYTES) {
return well_known_types::GetRepeatedBytesField(message, field, index,
scratch);
}
return well_known_types::GetRepeatedStringField(message, field, index,
scratch);
case FieldDescriptor::CPPTYPE_MESSAGE: {
const auto& submessage =
message.GetReflection()->GetRepeatedMessage(message, field, index);
return AsEquatableValue(reflection, submessage,
submessage.GetDescriptor(), scratch);
}
default:
return absl::InternalError(
absl::StrCat("unexpected field type: ", field->cpp_type_name()));
}
}
bool EquatableValueEquals(const EquatableValue& lhs,
const EquatableValue& rhs) {
return absl::visit(
absl::Overload(NullValueEqualer{}, BoolValueEqualer{},
BytesValueEqualer{}, IntValueEqualer{}, UintValueEqualer{},
DoubleValueEqualer{}, StringValueEqualer{},
DurationEqualer{}, TimestampEqualer{}, ListValueEqualer{},
StructEqualer{}, AnyEqualer{}, MessageEqualer{}),
lhs, rhs);
}
bool CoalesceMapKey(const google::protobuf::MapKey& src,
FieldDescriptor::CppType dest_type,
absl::Nonnull<google::protobuf::MapKey*> dest) {
switch (src.type()) {
case FieldDescriptor::CPPTYPE_BOOL:
if (dest_type != FieldDescriptor::CPPTYPE_BOOL) {
return false;
}
dest->SetBoolValue(src.GetBoolValue());
return true;
case FieldDescriptor::CPPTYPE_INT32: {
const auto src_value = src.GetInt32Value();
switch (dest_type) {
case FieldDescriptor::CPPTYPE_INT32:
dest->SetInt32Value(src_value);
return true;
case FieldDescriptor::CPPTYPE_INT64:
dest->SetInt64Value(src_value);
return true;
case FieldDescriptor::CPPTYPE_UINT32:
if (src_value < 0) {
return false;
}
dest->SetUInt32Value(static_cast<uint32_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_UINT64:
if (src_value < 0) {
return false;
}
dest->SetUInt64Value(static_cast<uint64_t>(src_value));
return true;
default:
return false;
}
}
case FieldDescriptor::CPPTYPE_INT64: {
const auto src_value = src.GetInt64Value();
switch (dest_type) {
case FieldDescriptor::CPPTYPE_INT32:
if (src_value < std::numeric_limits<int32_t>::min() ||
src_value > std::numeric_limits<int32_t>::max()) {
return false;
}
dest->SetInt32Value(static_cast<int32_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_INT64:
dest->SetInt64Value(src_value);
return true;
case FieldDescriptor::CPPTYPE_UINT32:
if (src_value < 0 ||
src_value > std::numeric_limits<uint32_t>::max()) {
return false;
}
dest->SetUInt32Value(static_cast<uint32_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_UINT64:
if (src_value < 0) {
return false;
}
dest->SetUInt64Value(static_cast<uint64_t>(src_value));
return true;
default:
return false;
}
}
case FieldDescriptor::CPPTYPE_UINT32: {
const auto src_value = src.GetUInt32Value();
switch (dest_type) {
case FieldDescriptor::CPPTYPE_INT32:
if (src_value > std::numeric_limits<int32_t>::max()) {
return false;
}
dest->SetInt32Value(static_cast<int32_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_INT64:
dest->SetInt64Value(static_cast<int64_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_UINT32:
dest->SetUInt32Value(src_value);
return true;
case FieldDescriptor::CPPTYPE_UINT64:
dest->SetUInt64Value(static_cast<uint64_t>(src_value));
return true;
default:
return false;
}
}
case FieldDescriptor::CPPTYPE_UINT64: {
const auto src_value = src.GetUInt64Value();
switch (dest_type) {
case FieldDescriptor::CPPTYPE_INT32:
if (src_value > std::numeric_limits<int32_t>::max()) {
return false;
}
dest->SetInt32Value(static_cast<int32_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_INT64:
if (src_value > std::numeric_limits<int64_t>::max()) {
return false;
}
dest->SetInt64Value(static_cast<int64_t>(src_value));
return true;
case FieldDescriptor::CPPTYPE_UINT32:
if (src_value > std::numeric_limits<uint32_t>::max()) {
return false;
}
dest->SetUInt32Value(src_value);
return true;
case FieldDescriptor::CPPTYPE_UINT64:
dest->SetUInt64Value(src_value);
return true;
default:
return false;
}
}
case FieldDescriptor::CPPTYPE_STRING:
if (dest_type != FieldDescriptor::CPPTYPE_STRING) {
return false;
}
dest->SetStringValue(src.GetStringValue());
return true;
default:
ABSL_UNREACHABLE();
}
}
enum class EquatableCategory {
kNone = 0,
kNullLike = 1 << 0,
kBoolLike = 1 << 1,
kNumericLike = 1 << 2,
kBytesLike = 1 << 3,
kStringLike = 1 << 4,
kList = 1 << 5,
kMap = 1 << 6,
kMessage = 1 << 7,
kDuration = 1 << 8,
kTimestamp = 1 << 9,
kAny = kNullLike | kBoolLike | kNumericLike | kBytesLike | kStringLike |
kList | kMap | kMessage | kDuration | kTimestamp,
kValue = kNullLike | kBoolLike | kNumericLike | kStringLike | kList | kMap,
};
constexpr EquatableCategory operator&(EquatableCategory lhs,
EquatableCategory rhs) {
return static_cast<EquatableCategory>(
static_cast<std::underlying_type_t<EquatableCategory>>(lhs) &
static_cast<std::underlying_type_t<EquatableCategory>>(rhs));
}
constexpr bool operator==(EquatableCategory lhs, EquatableCategory rhs) {
return static_cast<std::underlying_type_t<EquatableCategory>>(lhs) ==
static_cast<std::underlying_type_t<EquatableCategory>>(rhs);
}
EquatableCategory GetEquatableCategory(
absl::Nonnull<const Descriptor*> descriptor) {
switch (descriptor->well_known_type()) {
case Descriptor::WELLKNOWNTYPE_BOOLVALUE:
return EquatableCategory::kBoolLike;
case Descriptor::WELLKNOWNTYPE_FLOATVALUE:
ABSL_FALLTHROUGH_INTENDED;
case Descriptor::WELLKNOWNTYPE_DOUBLEVALUE:
ABSL_FALLTHROUGH_INTENDED;
case Descriptor::WELLKNOWNTYPE_INT32VALUE:
ABSL_FALLTHROUGH_INTENDED;
case Descriptor::WELLKNOWNTYPE_UINT32VALUE:
ABSL_FALLTHROUGH_INTENDED;
case Descriptor::WELLKNOWNTYPE_INT64VALUE:
ABSL_FALLTHROUGH_INTENDED;
case Descriptor::WELLKNOWNTYPE_UINT64VALUE:
return EquatableCategory::kNumericLike;
case Descriptor::WELLKNOWNTYPE_BYTESVALUE:
return EquatableCategory::kBytesLike;
case Descriptor::WELLKNOWNTYPE_STRINGVALUE:
return EquatableCategory::kStringLike;
case Descriptor::WELLKNOWNTYPE_VALUE:
return EquatableCategory::kValue;
case Descriptor::WELLKNOWNTYPE_LISTVALUE:
return EquatableCategory::kList;
case Descriptor::WELLKNOWNTYPE_STRUCT:
return EquatableCategory::kMap;
case Descriptor::WELLKNOWNTYPE_ANY:
return EquatableCategory::kAny;
case Descriptor::WELLKNOWNTYPE_DURATION:
return EquatableCategory::kDuration;
case Descriptor::WELLKNOWNTYPE_TIMESTAMP:
return EquatableCategory::kTimestamp;
default:
return EquatableCategory::kAny;
}
}
EquatableCategory GetEquatableFieldCategory(
absl::Nonnull<const FieldDescriptor*> field) {
switch (field->cpp_type()) {
case FieldDescriptor::CPPTYPE_ENUM:
return field->enum_type()->full_name() == "google.protobuf.NullValue"
? EquatableCategory::kNullLike
: EquatableCategory::kNumericLike;
case FieldDescriptor::CPPTYPE_BOOL:
return EquatableCategory::kBoolLike;
case FieldDescriptor::CPPTYPE_FLOAT:
ABSL_FALLTHROUGH_INTENDED;
case FieldDescriptor::CPPTYPE_DOUBLE:
ABSL_FALLTHROUGH_INTENDED;
case FieldDescriptor::CPPTYPE_INT32:
ABSL_FALLTHROUGH_INTENDED;
case FieldDescriptor::CPPTYPE_UINT32:
ABSL_FALLTHROUGH_INTENDED;
case FieldDescriptor::CPPTYPE_INT64:
ABSL_FALLTHROUGH_INTENDED;
case FieldDescriptor::CPPTYPE_UINT64:
return EquatableCategory::kNumericLike;
case FieldDescriptor::CPPTYPE_STRING:
return field->type() == FieldDescriptor::TYPE_BYTES
? EquatableCategory::kBytesLike
: EquatableCategory::kStringLike;
case FieldDescriptor::CPPTYPE_MESSAGE:
return GetEquatableCategory(field->message_type());
default:
return EquatableCategory::kAny;
}
}
class MessageEqualsState final {
public:
MessageEqualsState(absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<MessageFactory*> factory)
: pool_(pool), factory_(factory) {}
absl::StatusOr<bool> Equals(const Message& lhs, const Message& rhs) {
const auto* lhs_descriptor = lhs.GetDescriptor();
const auto* rhs_descriptor = rhs.GetDescriptor();
auto lhs_well_known_type = lhs_descriptor->well_known_type();
auto rhs_well_known_type = rhs_descriptor->well_known_type();
absl::Nonnull<const Message*> lhs_ptr = &lhs;
absl::Nonnull<const Message*> rhs_ptr = &rhs;
Unique<Message> lhs_unpacked;
Unique<Message> rhs_unpacked;
if (lhs_well_known_type == Descriptor::WELLKNOWNTYPE_ANY) {
CEL_ASSIGN_OR_RETURN(
lhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, lhs_reflection_.any_reflection, lhs, pool_, factory_));
if (lhs_unpacked) {
lhs_ptr = cel::to_address(lhs_unpacked);
lhs_descriptor = lhs_ptr->GetDescriptor();
lhs_well_known_type = lhs_descriptor->well_known_type();
}
}
if (rhs_well_known_type == Descriptor::WELLKNOWNTYPE_ANY) {
CEL_ASSIGN_OR_RETURN(
rhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, rhs_reflection_.any_reflection, rhs, pool_, factory_));
if (rhs_unpacked) {
rhs_ptr = cel::to_address(rhs_unpacked);
rhs_descriptor = rhs_ptr->GetDescriptor();
rhs_well_known_type = rhs_descriptor->well_known_type();
}
}
CEL_ASSIGN_OR_RETURN(
auto lhs_value,
AsEquatableValue(lhs_reflection_, *lhs_ptr, lhs_descriptor,
lhs_well_known_type, lhs_scratch_));
CEL_ASSIGN_OR_RETURN(
auto rhs_value,
AsEquatableValue(rhs_reflection_, *rhs_ptr, rhs_descriptor,
rhs_well_known_type, rhs_scratch_));
return EquatableValueEquals(lhs_value, rhs_value);
}
absl::StatusOr<bool> MapFieldEquals(
const Message& lhs, absl::Nonnull<const FieldDescriptor*> lhs_field,
const Message& rhs, absl::Nonnull<const FieldDescriptor*> rhs_field) {
ABSL_DCHECK(lhs_field->is_map());
ABSL_DCHECK_EQ(lhs_field->containing_type(), lhs.GetDescriptor());
ABSL_DCHECK(rhs_field->is_map());
ABSL_DCHECK_EQ(rhs_field->containing_type(), rhs.GetDescriptor());
const auto* lhs_entry = lhs_field->message_type();
const auto* lhs_entry_key_field = lhs_entry->map_key();
const auto* lhs_entry_value_field = lhs_entry->map_value();
const auto* rhs_entry = rhs_field->message_type();
const auto* rhs_entry_key_field = rhs_entry->map_key();
const auto* rhs_entry_value_field = rhs_entry->map_value();
if (lhs_field != rhs_field &&
((GetEquatableFieldCategory(lhs_entry_key_field) &
GetEquatableFieldCategory(rhs_entry_key_field)) ==
EquatableCategory::kNone ||
(GetEquatableFieldCategory(lhs_entry_value_field) &
GetEquatableFieldCategory(rhs_entry_value_field)) ==
EquatableCategory::kNone)) {
return false;
}
const auto* lhs_reflection = lhs.GetReflection();
const auto* rhs_reflection = rhs.GetReflection();
if (MapSize(*lhs_reflection, lhs, *lhs_field) !=
MapSize(*rhs_reflection, rhs, *rhs_field)) {
return false;
}
auto lhs_begin = MapBegin(*lhs_reflection, lhs, *lhs_field);
const auto lhs_end = MapEnd(*lhs_reflection, lhs, *lhs_field);
Unique<Message> lhs_unpacked;
EquatableValue lhs_value;
Unique<Message> rhs_unpacked;
EquatableValue rhs_value;
google::protobuf::MapKey rhs_map_key;
google::protobuf::MapValueConstRef rhs_map_value;
for (; lhs_begin != lhs_end; ++lhs_begin) {
if (!CoalesceMapKey(lhs_begin.GetKey(), rhs_entry_key_field->cpp_type(),
&rhs_map_key)) {
return false;
}
if (!LookupMapValue(*rhs_reflection, rhs, *rhs_field, rhs_map_key,
&rhs_map_value)) {
return false;
}
CEL_ASSIGN_OR_RETURN(lhs_value,
MapValueAsEquatableValue(
&arena_, pool_, factory_, lhs_reflection_,
lhs_begin.GetValueRef(), lhs_entry_value_field,
lhs_scratch_, lhs_unpacked));
CEL_ASSIGN_OR_RETURN(
rhs_value,
MapValueAsEquatableValue(&arena_, pool_, factory_, rhs_reflection_,
rhs_map_value, rhs_entry_value_field,
rhs_scratch_, rhs_unpacked));
if (!EquatableValueEquals(lhs_value, rhs_value)) {
return false;
}
}
return true;
}
absl::StatusOr<bool> RepeatedFieldEquals(
const Message& lhs, absl::Nonnull<const FieldDescriptor*> lhs_field,
const Message& rhs, absl::Nonnull<const FieldDescriptor*> rhs_field) {
ABSL_DCHECK(lhs_field->is_repeated() && !lhs_field->is_map());
ABSL_DCHECK_EQ(lhs_field->containing_type(), lhs.GetDescriptor());
ABSL_DCHECK(rhs_field->is_repeated() && !rhs_field->is_map());
ABSL_DCHECK_EQ(rhs_field->containing_type(), rhs.GetDescriptor());
if (lhs_field != rhs_field &&
(GetEquatableFieldCategory(lhs_field) &
GetEquatableFieldCategory(rhs_field)) == EquatableCategory::kNone) {
return false;
}
const auto* lhs_reflection = lhs.GetReflection();
const auto* rhs_reflection = rhs.GetReflection();
const auto size = lhs_reflection->FieldSize(lhs, lhs_field);
if (size != rhs_reflection->FieldSize(rhs, rhs_field)) {
return false;
}
Unique<Message> lhs_unpacked;
EquatableValue lhs_value;
Unique<Message> rhs_unpacked;
EquatableValue rhs_value;
for (int i = 0; i < size; ++i) {
CEL_ASSIGN_OR_RETURN(lhs_value,
RepeatedFieldAsEquatableValue(
&arena_, pool_, factory_, lhs_reflection_, lhs,
lhs_field, i, lhs_scratch_, lhs_unpacked));
CEL_ASSIGN_OR_RETURN(rhs_value,
RepeatedFieldAsEquatableValue(
&arena_, pool_, factory_, rhs_reflection_, rhs,
rhs_field, i, rhs_scratch_, rhs_unpacked));
if (!EquatableValueEquals(lhs_value, rhs_value)) {
return false;
}
}
return true;
}
absl::StatusOr<bool> SingularFieldEquals(
const Message& lhs, absl::Nullable<const FieldDescriptor*> lhs_field,
const Message& rhs, absl::Nullable<const FieldDescriptor*> rhs_field) {
ABSL_DCHECK(lhs_field == nullptr ||
(!lhs_field->is_repeated() && !lhs_field->is_map()));
ABSL_DCHECK(lhs_field == nullptr ||
lhs_field->containing_type() == lhs.GetDescriptor());
ABSL_DCHECK(rhs_field == nullptr ||
(!rhs_field->is_repeated() && !rhs_field->is_map()));
ABSL_DCHECK(rhs_field == nullptr ||
rhs_field->containing_type() == rhs.GetDescriptor());
if (lhs_field != rhs_field &&
((lhs_field != nullptr ? GetEquatableFieldCategory(lhs_field)
: GetEquatableCategory(lhs.GetDescriptor())) &
(rhs_field != nullptr ? GetEquatableFieldCategory(rhs_field)
: GetEquatableCategory(rhs.GetDescriptor()))) ==
EquatableCategory::kNone) {
return false;
}
absl::Nonnull<const Message*> lhs_ptr = &lhs;
absl::Nonnull<const Message*> rhs_ptr = &rhs;
Unique<Message> lhs_unpacked;
Unique<Message> rhs_unpacked;
if (lhs_field != nullptr && IsAnyField(lhs_field)) {
CEL_ASSIGN_OR_RETURN(lhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, lhs_reflection_.any_reflection,
lhs.GetReflection()->GetMessage(lhs, lhs_field),
pool_, factory_));
if (lhs_unpacked) {
lhs_ptr = cel::to_address(lhs_unpacked);
lhs_field = nullptr;
}
} else if (lhs_field == nullptr && IsAny(lhs)) {
CEL_ASSIGN_OR_RETURN(
lhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, lhs_reflection_.any_reflection, lhs, pool_, factory_));
if (lhs_unpacked) {
lhs_ptr = cel::to_address(lhs_unpacked);
}
}
if (rhs_field != nullptr && IsAnyField(rhs_field)) {
CEL_ASSIGN_OR_RETURN(rhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, rhs_reflection_.any_reflection,
rhs.GetReflection()->GetMessage(rhs, rhs_field),
pool_, factory_));
if (rhs_unpacked) {
rhs_ptr = cel::to_address(rhs_unpacked);
rhs_field = nullptr;
}
} else if (rhs_field == nullptr && IsAny(rhs)) {
CEL_ASSIGN_OR_RETURN(
rhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, rhs_reflection_.any_reflection, rhs, pool_, factory_));
if (rhs_unpacked) {
rhs_ptr = cel::to_address(rhs_unpacked);
}
}
EquatableValue lhs_value;
if (lhs_field != nullptr) {
CEL_ASSIGN_OR_RETURN(
lhs_value,
AsEquatableValue(lhs_reflection_, *lhs_ptr, lhs_field, lhs_scratch_));
} else {
CEL_ASSIGN_OR_RETURN(
lhs_value, AsEquatableValue(lhs_reflection_, *lhs_ptr,
lhs_ptr->GetDescriptor(), lhs_scratch_));
}
EquatableValue rhs_value;
if (rhs_field != nullptr) {
CEL_ASSIGN_OR_RETURN(
rhs_value,
AsEquatableValue(rhs_reflection_, *rhs_ptr, rhs_field, rhs_scratch_));
} else {
CEL_ASSIGN_OR_RETURN(
rhs_value, AsEquatableValue(rhs_reflection_, *rhs_ptr,
rhs_ptr->GetDescriptor(), rhs_scratch_));
}
return EquatableValueEquals(lhs_value, rhs_value);
}
absl::StatusOr<bool> FieldEquals(
const Message& lhs, absl::Nullable<const FieldDescriptor*> lhs_field,
const Message& rhs, absl::Nullable<const FieldDescriptor*> rhs_field) {
ABSL_DCHECK(lhs_field != nullptr ||
rhs_field != nullptr);
if (lhs_field != nullptr && lhs_field->is_map()) {
if (rhs_field != nullptr && rhs_field->is_map()) {
return MapFieldEquals(lhs, lhs_field, rhs, rhs_field);
}
if (rhs_field != nullptr &&
(rhs_field->is_repeated() ||
rhs_field->type() != FieldDescriptor::TYPE_MESSAGE)) {
return false;
}
absl::Nullable<const Message*> rhs_packed = nullptr;
Unique<Message> rhs_unpacked;
if (rhs_field != nullptr && IsAnyField(rhs_field)) {
rhs_packed = &rhs.GetReflection()->GetMessage(rhs, rhs_field);
} else if (rhs_field == nullptr && IsAny(rhs)) {
rhs_packed = &rhs;
}
if (rhs_packed != nullptr) {
CEL_RETURN_IF_ERROR(rhs_reflection_.any_reflection.Initialize(
rhs_packed->GetDescriptor()));
auto rhs_type_url = rhs_reflection_.any_reflection.GetTypeUrl(
*rhs_packed, rhs_scratch_);
if (!rhs_type_url.ConsumePrefix("type.googleapis.com/") &&
!rhs_type_url.ConsumePrefix("type.googleprod.com/")) {
return false;
}
if (rhs_type_url != "google.protobuf.Value" &&
rhs_type_url != "google.protobuf.Struct" &&
rhs_type_url != "google.protobuf.Any") {
return false;
}
CEL_ASSIGN_OR_RETURN(rhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, rhs_reflection_.any_reflection,
*rhs_packed, pool_, factory_));
if (rhs_unpacked) {
rhs_field = nullptr;
}
}
absl::Nonnull<const Message*> rhs_message =
rhs_field != nullptr
? &rhs.GetReflection()->GetMessage(rhs, rhs_field)
: rhs_unpacked != nullptr ? cel::to_address(rhs_unpacked)
: &rhs;
const auto* rhs_descriptor = rhs_message->GetDescriptor();
const auto rhs_well_known_type = rhs_descriptor->well_known_type();
switch (rhs_well_known_type) {
case Descriptor::WELLKNOWNTYPE_VALUE: {
CEL_RETURN_IF_ERROR(
rhs_reflection_.value_reflection.Initialize(rhs_descriptor));
if (rhs_reflection_.value_reflection.GetKindCase(*rhs_message) !=
google::protobuf::Value::kStructValue) {
return false;
}
CEL_RETURN_IF_ERROR(rhs_reflection_.struct_reflection.Initialize(
rhs_reflection_.value_reflection.GetStructDescriptor()));
return MapFieldEquals(
lhs, lhs_field,
rhs_reflection_.value_reflection.GetStructValue(*rhs_message),
rhs_reflection_.struct_reflection.GetFieldsDescriptor());
}
case Descriptor::WELLKNOWNTYPE_STRUCT: {
CEL_RETURN_IF_ERROR(
rhs_reflection_.struct_reflection.Initialize(rhs_descriptor));
return MapFieldEquals(
lhs, lhs_field, *rhs_message,
rhs_reflection_.struct_reflection.GetFieldsDescriptor());
}
default:
return false;
}
ABSL_UNREACHABLE();
}
if (rhs_field != nullptr && rhs_field->is_map()) {
ABSL_DCHECK(lhs_field == nullptr ||
!lhs_field->is_map());
if (lhs_field != nullptr &&
(lhs_field->is_repeated() ||
lhs_field->type() != FieldDescriptor::TYPE_MESSAGE)) {
return false;
}
absl::Nullable<const Message*> lhs_packed = nullptr;
Unique<Message> lhs_unpacked;
if (lhs_field != nullptr && IsAnyField(lhs_field)) {
lhs_packed = &lhs.GetReflection()->GetMessage(lhs, lhs_field);
} else if (lhs_field == nullptr && IsAny(lhs)) {
lhs_packed = &lhs;
}
if (lhs_packed != nullptr) {
CEL_RETURN_IF_ERROR(lhs_reflection_.any_reflection.Initialize(
lhs_packed->GetDescriptor()));
auto lhs_type_url = lhs_reflection_.any_reflection.GetTypeUrl(
*lhs_packed, lhs_scratch_);
if (!lhs_type_url.ConsumePrefix("type.googleapis.com/") &&
!lhs_type_url.ConsumePrefix("type.googleprod.com/")) {
return false;
}
if (lhs_type_url != "google.protobuf.Value" &&
lhs_type_url != "google.protobuf.Struct" &&
lhs_type_url != "google.protobuf.Any") {
return false;
}
CEL_ASSIGN_OR_RETURN(lhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, lhs_reflection_.any_reflection,
*lhs_packed, pool_, factory_));
if (lhs_unpacked) {
lhs_field = nullptr;
}
}
absl::Nonnull<const Message*> lhs_message =
lhs_field != nullptr
? &lhs.GetReflection()->GetMessage(lhs, lhs_field)
: lhs_unpacked != nullptr ? cel::to_address(lhs_unpacked)
: &lhs;
const auto* lhs_descriptor = lhs_message->GetDescriptor();
const auto lhs_well_known_type = lhs_descriptor->well_known_type();
switch (lhs_well_known_type) {
case Descriptor::WELLKNOWNTYPE_VALUE: {
CEL_RETURN_IF_ERROR(
lhs_reflection_.value_reflection.Initialize(lhs_descriptor));
if (lhs_reflection_.value_reflection.GetKindCase(*lhs_message) !=
google::protobuf::Value::kStructValue) {
return false;
}
CEL_RETURN_IF_ERROR(lhs_reflection_.struct_reflection.Initialize(
lhs_reflection_.value_reflection.GetStructDescriptor()));
return MapFieldEquals(
lhs_reflection_.value_reflection.GetStructValue(*lhs_message),
lhs_reflection_.struct_reflection.GetFieldsDescriptor(), rhs,
rhs_field);
}
case Descriptor::WELLKNOWNTYPE_STRUCT: {
CEL_RETURN_IF_ERROR(
lhs_reflection_.struct_reflection.Initialize(lhs_descriptor));
return MapFieldEquals(
*lhs_message,
lhs_reflection_.struct_reflection.GetFieldsDescriptor(), rhs,
rhs_field);
}
default:
return false;
}
ABSL_UNREACHABLE();
}
ABSL_DCHECK(lhs_field == nullptr ||
!lhs_field->is_map());
ABSL_DCHECK(rhs_field == nullptr ||
!rhs_field->is_map());
if (lhs_field != nullptr && lhs_field->is_repeated()) {
if (rhs_field != nullptr && rhs_field->is_repeated()) {
return RepeatedFieldEquals(lhs, lhs_field, rhs, rhs_field);
}
if (rhs_field != nullptr &&
rhs_field->type() != FieldDescriptor::TYPE_MESSAGE) {
return false;
}
absl::Nullable<const Message*> rhs_packed = nullptr;
Unique<Message> rhs_unpacked;
if (rhs_field != nullptr && IsAnyField(rhs_field)) {
rhs_packed = &rhs.GetReflection()->GetMessage(rhs, rhs_field);
} else if (rhs_field == nullptr && IsAny(rhs)) {
rhs_packed = &rhs;
}
if (rhs_packed != nullptr) {
CEL_RETURN_IF_ERROR(rhs_reflection_.any_reflection.Initialize(
rhs_packed->GetDescriptor()));
auto rhs_type_url = rhs_reflection_.any_reflection.GetTypeUrl(
*rhs_packed, rhs_scratch_);
if (!rhs_type_url.ConsumePrefix("type.googleapis.com/") &&
!rhs_type_url.ConsumePrefix("type.googleprod.com/")) {
return false;
}
if (rhs_type_url != "google.protobuf.Value" &&
rhs_type_url != "google.protobuf.ListValue" &&
rhs_type_url != "google.protobuf.Any") {
return false;
}
CEL_ASSIGN_OR_RETURN(rhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, rhs_reflection_.any_reflection,
*rhs_packed, pool_, factory_));
if (rhs_unpacked) {
rhs_field = nullptr;
}
}
absl::Nonnull<const Message*> rhs_message =
rhs_field != nullptr
? &rhs.GetReflection()->GetMessage(rhs, rhs_field)
: rhs_unpacked != nullptr ? cel::to_address(rhs_unpacked)
: &rhs;
const auto* rhs_descriptor = rhs_message->GetDescriptor();
const auto rhs_well_known_type = rhs_descriptor->well_known_type();
switch (rhs_well_known_type) {
case Descriptor::WELLKNOWNTYPE_VALUE: {
CEL_RETURN_IF_ERROR(
rhs_reflection_.value_reflection.Initialize(rhs_descriptor));
if (rhs_reflection_.value_reflection.GetKindCase(*rhs_message) !=
google::protobuf::Value::kListValue) {
return false;
}
CEL_RETURN_IF_ERROR(rhs_reflection_.list_value_reflection.Initialize(
rhs_reflection_.value_reflection.GetListValueDescriptor()));
return RepeatedFieldEquals(
lhs, lhs_field,
rhs_reflection_.value_reflection.GetListValue(*rhs_message),
rhs_reflection_.list_value_reflection.GetValuesDescriptor());
}
case Descriptor::WELLKNOWNTYPE_LISTVALUE: {
CEL_RETURN_IF_ERROR(
rhs_reflection_.list_value_reflection.Initialize(rhs_descriptor));
return RepeatedFieldEquals(
lhs, lhs_field, *rhs_message,
rhs_reflection_.list_value_reflection.GetValuesDescriptor());
}
default:
return false;
}
ABSL_UNREACHABLE();
}
if (rhs_field != nullptr && rhs_field->is_repeated()) {
ABSL_DCHECK(lhs_field == nullptr ||
!lhs_field->is_repeated());
if (lhs_field != nullptr &&
lhs_field->type() != FieldDescriptor::TYPE_MESSAGE) {
return false;
}
absl::Nullable<const Message*> lhs_packed = nullptr;
Unique<Message> lhs_unpacked;
if (lhs_field != nullptr && IsAnyField(lhs_field)) {
lhs_packed = &lhs.GetReflection()->GetMessage(lhs, lhs_field);
} else if (lhs_field == nullptr && IsAny(lhs)) {
lhs_packed = &lhs;
}
if (lhs_packed != nullptr) {
CEL_RETURN_IF_ERROR(lhs_reflection_.any_reflection.Initialize(
lhs_packed->GetDescriptor()));
auto lhs_type_url = lhs_reflection_.any_reflection.GetTypeUrl(
*lhs_packed, lhs_scratch_);
if (!lhs_type_url.ConsumePrefix("type.googleapis.com/") &&
!lhs_type_url.ConsumePrefix("type.googleprod.com/")) {
return false;
}
if (lhs_type_url != "google.protobuf.Value" &&
lhs_type_url != "google.protobuf.ListValue" &&
lhs_type_url != "google.protobuf.Any") {
return false;
}
CEL_ASSIGN_OR_RETURN(lhs_unpacked,
well_known_types::UnpackAnyIfResolveable(
&arena_, lhs_reflection_.any_reflection,
*lhs_packed, pool_, factory_));
if (lhs_unpacked) {
lhs_field = nullptr;
}
}
absl::Nonnull<const Message*> lhs_message =
lhs_field != nullptr
? &lhs.GetReflection()->GetMessage(lhs, lhs_field)
: lhs_unpacked != nullptr ? cel::to_address(lhs_unpacked)
: &lhs;
const auto* lhs_descriptor = lhs_message->GetDescriptor();
const auto lhs_well_known_type = lhs_descriptor->well_known_type();
switch (lhs_well_known_type) {
case Descriptor::WELLKNOWNTYPE_VALUE: {
CEL_RETURN_IF_ERROR(
lhs_reflection_.value_reflection.Initialize(lhs_descriptor));
if (lhs_reflection_.value_reflection.GetKindCase(*lhs_message) !=
google::protobuf::Value::kListValue) {
return false;
}
CEL_RETURN_IF_ERROR(lhs_reflection_.list_value_reflection.Initialize(
lhs_reflection_.value_reflection.GetListValueDescriptor()));
return RepeatedFieldEquals(
lhs_reflection_.value_reflection.GetListValue(*lhs_message),
lhs_reflection_.list_value_reflection.GetValuesDescriptor(), rhs,
rhs_field);
}
case Descriptor::WELLKNOWNTYPE_LISTVALUE: {
CEL_RETURN_IF_ERROR(
lhs_reflection_.list_value_reflection.Initialize(lhs_descriptor));
return RepeatedFieldEquals(
*lhs_message,
lhs_reflection_.list_value_reflection.GetValuesDescriptor(), rhs,
rhs_field);
}
default:
return false;
}
ABSL_UNREACHABLE();
}
return SingularFieldEquals(lhs, lhs_field, rhs, rhs_field);
}
private:
const absl::Nonnull<const DescriptorPool*> pool_;
const absl::Nonnull<MessageFactory*> factory_;
google::protobuf::Arena arena_;
EquatableValueReflection lhs_reflection_;
EquatableValueReflection rhs_reflection_;
std::string lhs_scratch_;
std::string rhs_scratch_;
};
}
absl::StatusOr<bool> MessageEquals(const Message& lhs, const Message& rhs,
absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<MessageFactory*> factory) {
ABSL_DCHECK(pool != nullptr);
ABSL_DCHECK(factory != nullptr);
if (&lhs == &rhs) {
return true;
}
return std::make_unique<MessageEqualsState>(pool, factory)->Equals(lhs, rhs);
}
absl::StatusOr<bool> MessageFieldEquals(
const Message& lhs, absl::Nonnull<const FieldDescriptor*> lhs_field,
const Message& rhs, absl::Nonnull<const FieldDescriptor*> rhs_field,
absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<MessageFactory*> factory) {
ABSL_DCHECK(lhs_field != nullptr);
ABSL_DCHECK(rhs_field != nullptr);
ABSL_DCHECK(pool != nullptr);
ABSL_DCHECK(factory != nullptr);
if (&lhs == &rhs && lhs_field == rhs_field) {
return true;
}
return std::make_unique<MessageEqualsState>(pool, factory)
->FieldEquals(lhs, lhs_field, rhs, rhs_field);
}
absl::StatusOr<bool> MessageFieldEquals(
const google::protobuf::Message& lhs, const google::protobuf::Message& rhs,
absl::Nonnull<const google::protobuf::FieldDescriptor*> rhs_field,
absl::Nonnull<const google::protobuf::DescriptorPool*> pool,
absl::Nonnull<google::protobuf::MessageFactory*> factory) {
ABSL_DCHECK(rhs_field != nullptr);
ABSL_DCHECK(pool != nullptr);
ABSL_DCHECK(factory != nullptr);
return std::make_unique<MessageEqualsState>(pool, factory)
->FieldEquals(lhs, nullptr, rhs, rhs_field);
}
absl::StatusOr<bool> MessageFieldEquals(
const google::protobuf::Message& lhs,
absl::Nonnull<const google::protobuf::FieldDescriptor*> lhs_field,
const google::protobuf::Message& rhs,
absl::Nonnull<const google::protobuf::DescriptorPool*> pool,
absl::Nonnull<google::protobuf::MessageFactory*> factory) {
ABSL_DCHECK(lhs_field != nullptr);
ABSL_DCHECK(pool != nullptr);
ABSL_DCHECK(factory != nullptr);
return std::make_unique<MessageEqualsState>(pool, factory)
->FieldEquals(lhs, lhs_field, rhs, nullptr);
}
} | #include "internal/message_equality.h"
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/memory.h"
#include "internal/message_type_name.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "internal/well_known_types.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel::internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::TestParamInfo;
using ::testing::TestWithParam;
using ::testing::ValuesIn;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
template <typename T>
Owned<google::protobuf::Message> ParseTextProto(absl::string_view text) {
return DynamicParseTextProto<T>(NewDeleteAllocator(), text,
GetTestingDescriptorPool(),
GetTestingMessageFactory());
}
struct UnaryMessageEqualsTestParam {
std::string name;
std::vector<Owned<google::protobuf::Message>> ops;
bool equal;
};
std::string UnaryMessageEqualsTestParamName(
const TestParamInfo<UnaryMessageEqualsTestParam>& param_info) {
return param_info.param.name;
}
using UnaryMessageEqualsTest = TestWithParam<UnaryMessageEqualsTestParam>;
Owned<google::protobuf::Message> PackMessage(const google::protobuf::Message& message) {
const auto* descriptor =
ABSL_DIE_IF_NULL(GetTestingDescriptorPool()->FindMessageTypeByName(
MessageTypeNameFor<google::protobuf::Any>()));
const auto* prototype =
ABSL_DIE_IF_NULL(GetTestingMessageFactory()->GetPrototype(descriptor));
auto instance = WrapShared(prototype->New(), NewDeleteAllocator());
auto reflection = well_known_types::GetAnyReflectionOrDie(descriptor);
reflection.SetTypeUrl(
cel::to_address(instance),
absl::StrCat("type.googleapis.com/", message.GetTypeName()));
absl::Cord value;
ABSL_CHECK(message.SerializeToCord(&value));
reflection.SetValue(cel::to_address(instance), value);
return instance;
}
TEST_P(UnaryMessageEqualsTest, Equals) {
const auto* pool = GetTestingDescriptorPool();
auto* factory = GetTestingMessageFactory();
const auto& test_case = GetParam();
for (const auto& lhs : test_case.ops) {
for (const auto& rhs : test_case.ops) {
if (!test_case.equal && &lhs == &rhs) {
continue;
}
EXPECT_THAT(MessageEquals(*lhs, *rhs, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs << " " << *rhs;
EXPECT_THAT(MessageEquals(*rhs, *lhs, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs << " " << *rhs;
auto lhs_any = PackMessage(*lhs);
auto rhs_any = PackMessage(*rhs);
EXPECT_THAT(MessageEquals(*lhs_any, *rhs, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_any << " " << *rhs;
EXPECT_THAT(MessageEquals(*lhs, *rhs_any, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs << " " << *rhs_any;
EXPECT_THAT(MessageEquals(*lhs_any, *rhs_any, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_any << " " << *rhs_any;
}
}
}
INSTANTIATE_TEST_SUITE_P(
UnaryMessageEqualsTest, UnaryMessageEqualsTest,
ValuesIn<UnaryMessageEqualsTestParam>({
{
.name = "NullValue_Equal",
.ops =
{
ParseTextProto<google::protobuf::Value>(R"pb()pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(null_value: NULL_VALUE)pb"),
},
.equal = true,
},
{
.name = "BoolValue_False_Equal",
.ops =
{
ParseTextProto<google::protobuf::BoolValue>(R"pb()pb"),
ParseTextProto<google::protobuf::BoolValue>(
R"pb(value: false)pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(bool_value: false)pb"),
},
.equal = true,
},
{
.name = "BoolValue_True_Equal",
.ops =
{
ParseTextProto<google::protobuf::BoolValue>(
R"pb(value: true)pb"),
ParseTextProto<google::protobuf::Value>(R"pb(bool_value:
true)pb"),
},
.equal = true,
},
{
.name = "StringValue_Empty_Equal",
.ops =
{
ParseTextProto<google::protobuf::StringValue>(R"pb()pb"),
ParseTextProto<google::protobuf::StringValue>(
R"pb(value: "")pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(string_value: "")pb"),
},
.equal = true,
},
{
.name = "StringValue_Equal",
.ops =
{
ParseTextProto<google::protobuf::StringValue>(
R"pb(value: "foo")pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(string_value: "foo")pb"),
},
.equal = true,
},
{
.name = "BytesValue_Empty_Equal",
.ops =
{
ParseTextProto<google::protobuf::BytesValue>(R"pb()pb"),
ParseTextProto<google::protobuf::BytesValue>(
R"pb(value: "")pb"),
},
.equal = true,
},
{
.name = "BytesValue_Equal",
.ops =
{
ParseTextProto<google::protobuf::BytesValue>(
R"pb(value: "foo")pb"),
ParseTextProto<google::protobuf::BytesValue>(
R"pb(value: "foo")pb"),
},
.equal = true,
},
{
.name = "ListValue_Equal",
.ops =
{
ParseTextProto<google::protobuf::Value>(
R"pb(list_value: { values { bool_value: true } })pb"),
ParseTextProto<google::protobuf::ListValue>(
R"pb(values { bool_value: true })pb"),
},
.equal = true,
},
{
.name = "ListValue_NotEqual",
.ops =
{
ParseTextProto<google::protobuf::Value>(
R"pb(list_value: { values { number_value: 0.0 } })pb"),
ParseTextProto<google::protobuf::ListValue>(
R"pb(values { number_value: 1.0 })pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(list_value: { values { number_value: 2.0 } })pb"),
ParseTextProto<google::protobuf::ListValue>(
R"pb(values { number_value: 3.0 })pb"),
},
.equal = false,
},
{
.name = "StructValue_Equal",
.ops =
{
ParseTextProto<google::protobuf::Value>(
R"pb(struct_value: {
fields {
key: "foo"
value: { bool_value: true }
}
})pb"),
ParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: { bool_value: true }
})pb"),
},
.equal = true,
},
{
.name = "StructValue_NotEqual",
.ops =
{
ParseTextProto<google::protobuf::Value>(
R"pb(struct_value: {
fields {
key: "foo"
value: { number_value: 0.0 }
}
})pb"),
ParseTextProto<google::protobuf::Struct>(
R"pb(
fields {
key: "bar"
value: { number_value: 0.0 }
})pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(struct_value: {
fields {
key: "foo"
value: { number_value: 1.0 }
}
})pb"),
ParseTextProto<google::protobuf::Struct>(
R"pb(
fields {
key: "bar"
value: { number_value: 1.0 }
})pb"),
},
.equal = false,
},
{
.name = "Heterogeneous_Equal",
.ops =
{
ParseTextProto<google::protobuf::Int32Value>(R"pb()pb"),
ParseTextProto<google::protobuf::Int64Value>(R"pb()pb"),
ParseTextProto<google::protobuf::UInt32Value>(R"pb()pb"),
ParseTextProto<google::protobuf::UInt64Value>(R"pb()pb"),
ParseTextProto<google::protobuf::FloatValue>(R"pb()pb"),
ParseTextProto<google::protobuf::DoubleValue>(R"pb()pb"),
ParseTextProto<google::protobuf::Value>(R"pb(number_value:
0.0)pb"),
},
.equal = true,
},
{
.name = "Message_Equals",
.ops =
{
ParseTextProto<TestAllTypesProto3>(R"pb()pb"),
ParseTextProto<TestAllTypesProto3>(R"pb()pb"),
},
.equal = true,
},
{
.name = "Heterogeneous_NotEqual",
.ops =
{
ParseTextProto<google::protobuf::BoolValue>(
R"pb(value: false)pb"),
ParseTextProto<google::protobuf::Int32Value>(
R"pb(value: 0)pb"),
ParseTextProto<google::protobuf::Int64Value>(
R"pb(value: 1)pb"),
ParseTextProto<google::protobuf::UInt32Value>(
R"pb(value: 2)pb"),
ParseTextProto<google::protobuf::UInt64Value>(
R"pb(value: 3)pb"),
ParseTextProto<google::protobuf::FloatValue>(
R"pb(value: 4.0)pb"),
ParseTextProto<google::protobuf::DoubleValue>(
R"pb(value: 5.0)pb"),
ParseTextProto<google::protobuf::Value>(R"pb()pb"),
ParseTextProto<google::protobuf::Value>(R"pb(bool_value:
true)pb"),
ParseTextProto<google::protobuf::Value>(R"pb(number_value:
6.0)pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(string_value: "bar")pb"),
ParseTextProto<google::protobuf::BytesValue>(
R"pb(value: "foo")pb"),
ParseTextProto<google::protobuf::StringValue>(
R"pb(value: "")pb"),
ParseTextProto<google::protobuf::StringValue>(
R"pb(value: "foo")pb"),
ParseTextProto<google::protobuf::Value>(
R"pb(list_value: {})pb"),
ParseTextProto<google::protobuf::ListValue>(
R"pb(values { bool_value: true })pb"),
ParseTextProto<google::protobuf::Value>(R"pb(struct_value:
{})pb"),
ParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: { bool_value: false }
})pb"),
ParseTextProto<google::protobuf::Duration>(R"pb()pb"),
ParseTextProto<google::protobuf::Duration>(
R"pb(seconds: 1 nanos: 1)pb"),
ParseTextProto<google::protobuf::Timestamp>(R"pb()pb"),
ParseTextProto<google::protobuf::Timestamp>(
R"pb(seconds: 1 nanos: 1)pb"),
ParseTextProto<TestAllTypesProto3>(R"pb()pb"),
ParseTextProto<TestAllTypesProto3>(
R"pb(single_bool: true)pb"),
},
.equal = false,
},
}),
UnaryMessageEqualsTestParamName);
struct UnaryMessageFieldEqualsTestParam {
std::string name;
std::string message;
std::vector<std::string> fields;
bool equal;
};
std::string UnaryMessageFieldEqualsTestParamName(
const TestParamInfo<UnaryMessageFieldEqualsTestParam>& param_info) {
return param_info.param.name;
}
using UnaryMessageFieldEqualsTest =
TestWithParam<UnaryMessageFieldEqualsTestParam>;
void PackMessageTo(const google::protobuf::Message& message, google::protobuf::Message* instance) {
auto reflection =
*well_known_types::GetAnyReflection(instance->GetDescriptor());
reflection.SetTypeUrl(
instance, absl::StrCat("type.googleapis.com/", message.GetTypeName()));
absl::Cord value;
ABSL_CHECK(message.SerializeToCord(&value));
reflection.SetValue(instance, value);
}
absl::optional<std::pair<Owned<google::protobuf::Message>,
absl::Nonnull<const google::protobuf::FieldDescriptor*>>>
PackTestAllTypesProto3Field(
const google::protobuf::Message& message,
absl::Nonnull<const google::protobuf::FieldDescriptor*> field) {
if (field->is_map()) {
return absl::nullopt;
}
if (field->is_repeated() &&
field->type() == google::protobuf::FieldDescriptor::TYPE_MESSAGE) {
const auto* descriptor = message.GetDescriptor();
const auto* any_field = descriptor->FindFieldByName("repeated_any");
auto packed = WrapShared(message.New(), NewDeleteAllocator());
const int size = message.GetReflection()->FieldSize(message, field);
for (int i = 0; i < size; ++i) {
PackMessageTo(
message.GetReflection()->GetRepeatedMessage(message, field, i),
packed->GetReflection()->AddMessage(cel::to_address(packed),
any_field));
}
return std::pair{packed, any_field};
}
if (!field->is_repeated() &&
field->type() == google::protobuf::FieldDescriptor::TYPE_MESSAGE) {
const auto* descriptor = message.GetDescriptor();
const auto* any_field = descriptor->FindFieldByName("single_any");
auto packed = WrapShared(message.New(), NewDeleteAllocator());
PackMessageTo(message.GetReflection()->GetMessage(message, field),
packed->GetReflection()->MutableMessage(
cel::to_address(packed), any_field));
return std::pair{packed, any_field};
}
return absl::nullopt;
}
TEST_P(UnaryMessageFieldEqualsTest, Equals) {
const auto* pool = GetTestingDescriptorPool();
auto* factory = GetTestingMessageFactory();
const auto& test_case = GetParam();
auto lhs_message = ParseTextProto<TestAllTypesProto3>(test_case.message);
auto rhs_message = ParseTextProto<TestAllTypesProto3>(test_case.message);
const auto* descriptor = ABSL_DIE_IF_NULL(
pool->FindMessageTypeByName(MessageTypeNameFor<TestAllTypesProto3>()));
for (const auto& lhs : test_case.fields) {
for (const auto& rhs : test_case.fields) {
if (!test_case.equal && lhs == rhs) {
continue;
}
const auto* lhs_field =
ABSL_DIE_IF_NULL(descriptor->FindFieldByName(lhs));
const auto* rhs_field =
ABSL_DIE_IF_NULL(descriptor->FindFieldByName(rhs));
EXPECT_THAT(MessageFieldEquals(*lhs_message, lhs_field, *rhs_message,
rhs_field, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << lhs_field->name() << " " << *rhs_message
<< " " << rhs_field->name();
EXPECT_THAT(MessageFieldEquals(*rhs_message, rhs_field, *lhs_message,
lhs_field, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << lhs_field->name() << " " << *rhs_message
<< " " << rhs_field->name();
if (!lhs_field->is_repeated() &&
lhs_field->type() == google::protobuf::FieldDescriptor::TYPE_MESSAGE) {
EXPECT_THAT(MessageFieldEquals(lhs_message->GetReflection()->GetMessage(
*lhs_message, lhs_field),
*rhs_message, rhs_field, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << lhs_field->name() << " " << *rhs_message
<< " " << rhs_field->name();
EXPECT_THAT(MessageFieldEquals(*rhs_message, rhs_field,
lhs_message->GetReflection()->GetMessage(
*lhs_message, lhs_field),
pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << lhs_field->name() << " " << *rhs_message
<< " " << rhs_field->name();
}
if (!rhs_field->is_repeated() &&
rhs_field->type() == google::protobuf::FieldDescriptor::TYPE_MESSAGE) {
EXPECT_THAT(MessageFieldEquals(*lhs_message, lhs_field,
rhs_message->GetReflection()->GetMessage(
*rhs_message, rhs_field),
pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << lhs_field->name() << " " << *rhs_message
<< " " << rhs_field->name();
EXPECT_THAT(MessageFieldEquals(rhs_message->GetReflection()->GetMessage(
*rhs_message, rhs_field),
*lhs_message, lhs_field, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << lhs_field->name() << " " << *rhs_message
<< " " << rhs_field->name();
}
absl::optional<std::pair<Owned<google::protobuf::Message>,
absl::Nonnull<const google::protobuf::FieldDescriptor*>>>
lhs_any = PackTestAllTypesProto3Field(*lhs_message, lhs_field);
absl::optional<std::pair<Owned<google::protobuf::Message>,
absl::Nonnull<const google::protobuf::FieldDescriptor*>>>
rhs_any = PackTestAllTypesProto3Field(*rhs_message, rhs_field);
if (lhs_any) {
EXPECT_THAT(MessageFieldEquals(*lhs_any->first, lhs_any->second,
*rhs_message, rhs_field, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_any->first << " " << *rhs_message;
if (!lhs_any->second->is_repeated()) {
EXPECT_THAT(
MessageFieldEquals(lhs_any->first->GetReflection()->GetMessage(
*lhs_any->first, lhs_any->second),
*rhs_message, rhs_field, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_any->first << " " << *rhs_message;
}
}
if (rhs_any) {
EXPECT_THAT(MessageFieldEquals(*lhs_message, lhs_field, *rhs_any->first,
rhs_any->second, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << *rhs_any->first;
if (!rhs_any->second->is_repeated()) {
EXPECT_THAT(
MessageFieldEquals(*lhs_message, lhs_field,
rhs_any->first->GetReflection()->GetMessage(
*rhs_any->first, rhs_any->second),
pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_message << " " << *rhs_any->first;
}
}
if (lhs_any && rhs_any) {
EXPECT_THAT(
MessageFieldEquals(*lhs_any->first, lhs_any->second,
*rhs_any->first, rhs_any->second, pool, factory),
IsOkAndHolds(test_case.equal))
<< *lhs_any->first << " " << *rhs_any->second;
}
}
}
}
INSTANTIATE_TEST_SUITE_P(
UnaryMessageFieldEqualsTest, UnaryMessageFieldEqualsTest,
ValuesIn<UnaryMessageFieldEqualsTestParam>({
{
.name = "Heterogeneous_Single_Equal",
.message = R"pb(
single_int32: 1
single_int64: 1
single_uint32: 1
single_uint64: 1
single_float: 1
single_double: 1
single_value: { number_value: 1 }
single_int32_wrapper: { value: 1 }
single_int64_wrapper: { value: 1 }
single_uint32_wrapper: { value: 1 }
single_uint64_wrapper: { value: 1 }
single_float_wrapper: { value: 1 }
single_double_wrapper: { value: 1 }
standalone_enum: BAR
)pb",
.fields =
{
"single_int32",
"single_int64",
"single_uint32",
"single_uint64",
"single_float",
"single_double",
"single_value",
"single_int32_wrapper",
"single_int64_wrapper",
"single_uint32_wrapper",
"single_uint64_wrapper",
"single_float_wrapper",
"single_double_wrapper",
"standalone_enum",
},
.equal = true,
},
{
.name = "Heterogeneous_Single_NotEqual",
.message = R"pb(
null_value: NULL_VALUE
single_bool: false
single_int32: 2
single_int64: 3
single_uint32: 4
single_uint64: 5
single_float: NaN
single_double: NaN
single_string: "foo"
single_bytes: "foo"
single_value: { number_value: 8 }
single_int32_wrapper: { value: 9 }
single_int64_wrapper: { value: 10 }
single_uint32_wrapper: { value: 11 }
single_uint64_wrapper: { value: 12 }
single_float_wrapper: { value: 13 }
single_double_wrapper: { value: 14 }
single_string_wrapper: { value: "bar" }
single_bytes_wrapper: { value: "bar" }
standalone_enum: BAR
)pb",
.fields =
{
"null_value",
"single_bool",
"single_int32",
"single_int64",
"single_uint32",
"single_uint64",
"single_float",
"single_double",
"single_string",
"single_bytes",
"single_value",
"single_int32_wrapper",
"single_int64_wrapper",
"single_uint32_wrapper",
"single_uint64_wrapper",
"single_float_wrapper",
"single_double_wrapper",
"standalone_enum",
},
.equal = false,
},
{
.name = "Heterogeneous_Repeated_Equal",
.message = R"pb(
repeated_int32: 1
repeated_int64: 1
repeated_uint32: 1
repeated_uint64: 1
repeated_float: 1
repeated_double: 1
repeated_value: { number_value: 1 }
repeated_int32_wrapper: { value: 1 }
repeated_int64_wrapper: { value: 1 }
repeated_uint32_wrapper: { value: 1 }
repeated_uint64_wrapper: { value: 1 }
repeated_float_wrapper: { value: 1 }
repeated_double_wrapper: { value: 1 }
repeated_nested_enum: BAR
single_value: { list_value: { values { number_value: 1 } } }
list_value: { values { number_value: 1 } }
)pb",
.fields =
{
"repeated_int32",
"repeated_int64",
"repeated_uint32",
"repeated_uint64",
"repeated_float",
"repeated_double",
"repeated_value",
"repeated_int32_wrapper",
"repeated_int64_wrapper",
"repeated_uint32_wrapper",
"repeated_uint64_wrapper",
"repeated_float_wrapper",
"repeated_double_wrapper",
"repeated_nested_enum",
"single_value",
"list_value",
},
.equal = true,
},
{
.name = "Heterogeneous_Repeated_NotEqual",
.message = R"pb(
repeated_null_value: NULL_VALUE
repeated_bool: false
repeated_int32: 2
repeated_int64: 3
repeated_uint32: 4
repeated_uint64: 5
repeated_float: 6
repeated_double: 7
repeated_string: "foo"
repeated_bytes: "foo"
repeated_value: { number_value: 8 }
repeated_int32_wrapper: { value: 9 }
repeated_int64_wrapper: { value: 10 }
repeated_uint32_wrapper: { value: 11 }
repeated_uint64_wrapper: { value: 12 }
repeated_float_wrapper: { value: 13 }
repeated_double_wrapper: { value: 14 }
repeated_string_wrapper: { value: "bar" }
repeated_bytes_wrapper: { value: "bar" }
repeated_nested_enum: BAR
)pb",
.fields =
{
"repeated_null_value",
"repeated_bool",
"repeated_int32",
"repeated_int64",
"repeated_uint32",
"repeated_uint64",
"repeated_float",
"repeated_double",
"repeated_string",
"repeated_bytes",
"repeated_value",
"repeated_int32_wrapper",
"repeated_int64_wrapper",
"repeated_uint32_wrapper",
"repeated_uint64_wrapper",
"repeated_float_wrapper",
"repeated_double_wrapper",
"repeated_nested_enum",
},
.equal = false,
},
{
.name = "Heterogeneous_Map_Equal",
.message = R"pb(
map_int32_int32 { key: 1 value: 1 }
map_int32_uint32 { key: 1 value: 1 }
map_int32_int64 { key: 1 value: 1 }
map_int32_uint64 { key: 1 value: 1 }
map_int32_float { key: 1 value: 1 }
map_int32_double { key: 1 value: 1 }
map_int32_enum { key: 1 value: BAR }
map_int32_value {
key: 1
value: { number_value: 1 }
}
map_int32_int32_wrapper {
key: 1
value: { value: 1 }
}
map_int32_uint32_wrapper {
key: 1
value: { value: 1 }
}
map_int32_int64_wrapper {
key: 1
value: { value: 1 }
}
map_int32_uint64_wrapper {
key: 1
value: { value: 1 }
}
map_int32_float_wrapper {
key: 1
value: { value: 1 }
}
map_int32_double_wrapper {
key: 1
value: { value: 1 }
}
map_int64_int32 { key: 1 value: 1 }
map_int64_uint32 { key: 1 value: 1 }
map_int64_int64 { key: 1 value: 1 }
map_int64_uint64 { key: 1 value: 1 }
map_int64_float { key: 1 value: 1 }
map_int64_double { key: 1 value: 1 }
map_int64_enum { key: 1 value: BAR }
map_int64_value {
key: 1
value: { number_value: 1 }
}
map_int64_int32_wrapper {
key: 1
value: { value: 1 }
}
map_int64_uint32_wrapper {
key: 1
value: { value: 1 }
}
map_int64_int64_wrapper {
key: 1
value: { value: 1 }
}
map_int64_uint64_wrapper {
key: 1
value: { value: 1 }
}
map_int64_float_wrapper {
key: 1
value: { value: 1 }
}
map_int64_double_wrapper {
key: 1
value: { value: 1 }
}
map_uint32_int32 { key: 1 value: 1 }
map_uint32_uint32 { key: 1 value: 1 }
map_uint32_int64 { key: 1 value: 1 }
map_uint32_uint64 { key: 1 value: 1 }
map_uint32_float { key: 1 value: 1 }
map_uint32_double { key: 1 value: 1 }
map_uint32_enum { key: 1 value: BAR }
map_uint32_value {
key: 1
value: { number_value: 1 }
}
map_uint32_int32_wrapper {
key: 1
value: { value: 1 }
}
map_uint32_uint32_wrapper {
key: 1
value: { value: 1 }
}
map_uint32_int64_wrapper {
key: 1
value: { value: 1 }
}
map_uint32_uint64_wrapper {
key: 1
value: { value: 1 }
}
map_uint32_float_wrapper {
key: 1
value: { value: 1 }
}
map_uint32_double_wrapper {
key: 1
value: { value: 1 }
}
map_uint64_int32 { key: 1 value: 1 }
map_uint64_uint32 { key: 1 value: 1 }
map_uint64_int64 { key: 1 value: 1 }
map_uint64_uint64 { key: 1 value: 1 }
map_uint64_float { key: 1 value: 1 }
map_uint64_double { key: 1 value: 1 }
map_uint64_enum { key: 1 value: BAR }
map_uint64_value {
key: 1
value: { number_value: 1 }
}
map_uint64_int32_wrapper {
key: 1
value: { value: 1 }
}
map_uint64_uint32_wrapper {
key: 1
value: { value: 1 }
}
map_uint64_int64_wrapper {
key: 1
value: { value: 1 }
}
map_uint64_uint64_wrapper {
key: 1
value: { value: 1 }
}
map_uint64_float_wrapper {
key: 1
value: { value: 1 }
}
map_uint64_double_wrapper {
key: 1
value: { value: 1 }
}
)pb",
.fields =
{
"map_int32_int32", "map_int32_uint32",
"map_int32_int64", "map_int32_uint64",
"map_int32_float", "map_int32_double",
"map_int32_enum", "map_int32_value",
"map_int32_int32_wrapper", "map_int32_uint32_wrapper",
"map_int32_int64_wrapper", "map_int32_uint64_wrapper",
"map_int32_float_wrapper", "map_int32_double_wrapper",
"map_int64_int32", "map_int64_uint32",
"map_int64_int64", "map_int64_uint64",
"map_int64_float", "map_int64_double",
"map_int64_enum", "map_int64_value",
"map_int64_int32_wrapper", "map_int64_uint32_wrapper",
"map_int64_int64_wrapper", "map_int64_uint64_wrapper",
"map_int64_float_wrapper", "map_int64_double_wrapper",
"map_uint32_int32", "map_uint32_uint32",
"map_uint32_int64", "map_uint32_uint64",
"map_uint32_float", "map_uint32_double",
"map_uint32_enum", "map_uint32_value",
"map_uint32_int32_wrapper", "map_uint32_uint32_wrapper",
"map_uint32_int64_wrapper", "map_uint32_uint64_wrapper",
"map_uint32_float_wrapper", "map_uint32_double_wrapper",
"map_uint64_int32", "map_uint64_uint32",
"map_uint64_int64", "map_uint64_uint64",
"map_uint64_float", "map_uint64_double",
"map_uint64_enum", "map_uint64_value",
"map_uint64_int32_wrapper", "map_uint64_uint32_wrapper",
"map_uint64_int64_wrapper", "map_uint64_uint64_wrapper",
"map_uint64_float_wrapper", "map_uint64_double_wrapper",
},
.equal = true,
},
{
.name = "Heterogeneous_Map_NotEqual",
.message = R"pb(
map_bool_bool { key: false value: false }
map_bool_int32 { key: false value: 1 }
map_bool_uint32 { key: false value: 0 }
map_int32_int32 { key: 0x7FFFFFFF value: 1 }
map_int64_int64 { key: 0x7FFFFFFFFFFFFFFF value: 1 }
map_uint32_uint32 { key: 0xFFFFFFFF value: 1 }
map_uint64_uint64 { key: 0xFFFFFFFFFFFFFFFF value: 1 }
map_string_string { key: "foo" value: "bar" }
map_string_bytes { key: "foo" value: "bar" }
map_int32_bytes { key: -2147483648 value: "bar" }
map_int64_bytes { key: -9223372036854775808 value: "bar" }
map_int32_float { key: -2147483648 value: 1 }
map_int64_double { key: -9223372036854775808 value: 1 }
map_uint32_string { key: 0xFFFFFFFF value: "bar" }
map_uint64_string { key: 0xFFFFFFFF value: "foo" }
map_uint32_bytes { key: 0xFFFFFFFF value: "bar" }
map_uint64_bytes { key: 0xFFFFFFFF value: "foo" }
map_uint32_bool { key: 0xFFFFFFFF value: false }
map_uint64_bool { key: 0xFFFFFFFF value: true }
single_value: {
struct_value: {
fields {
key: "bar"
value: { string_value: "foo" }
}
}
}
single_struct: {
fields {
key: "baz"
value: { string_value: "foo" }
}
}
standalone_message: {}
)pb",
.fields =
{
"map_bool_bool", "map_bool_int32",
"map_bool_uint32", "map_int32_int32",
"map_int64_int64", "map_uint32_uint32",
"map_uint64_uint64", "map_string_string",
"map_string_bytes", "map_int32_bytes",
"map_int64_bytes", "map_int32_float",
"map_int64_double", "map_uint32_string",
"map_uint64_string", "map_uint32_bytes",
"map_uint64_bytes", "map_uint32_bool",
"map_uint64_bool", "single_value",
"single_struct", "standalone_message",
},
.equal = false,
},
}),
UnaryMessageFieldEqualsTestParamName);
TEST(MessageEquals, AnyFallback) {
const auto* pool = GetTestingDescriptorPool();
auto* factory = GetTestingMessageFactory();
google::protobuf::Arena arena;
auto message1 = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb(single_any: {
type_url: "type.googleapis.com/message.that.does.not.Exist"
value: "foo"
})pb",
pool, factory);
auto message2 = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb(single_any: {
type_url: "type.googleapis.com/message.that.does.not.Exist"
value: "foo"
})pb",
pool, factory);
auto message3 = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb(single_any: {
type_url: "type.googleapis.com/message.that.does.not.Exist"
value: "bar"
})pb",
pool, factory);
EXPECT_THAT(MessageEquals(*message1, *message2, pool, factory),
IsOkAndHolds(IsTrue()));
EXPECT_THAT(MessageEquals(*message2, *message1, pool, factory),
IsOkAndHolds(IsTrue()));
EXPECT_THAT(MessageEquals(*message1, *message3, pool, factory),
IsOkAndHolds(IsFalse()));
EXPECT_THAT(MessageEquals(*message3, *message1, pool, factory),
IsOkAndHolds(IsFalse()));
}
TEST(MessageFieldEquals, AnyFallback) {
const auto* pool = GetTestingDescriptorPool();
auto* factory = GetTestingMessageFactory();
google::protobuf::Arena arena;
auto message1 = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb(single_any: {
type_url: "type.googleapis.com/message.that.does.not.Exist"
value: "foo"
})pb",
pool, factory);
auto message2 = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb(single_any: {
type_url: "type.googleapis.com/message.that.does.not.Exist"
value: "foo"
})pb",
pool, factory);
auto message3 = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb(single_any: {
type_url: "type.googleapis.com/message.that.does.not.Exist"
value: "bar"
})pb",
pool, factory);
EXPECT_THAT(MessageFieldEquals(
*message1,
ABSL_DIE_IF_NULL(
message1->GetDescriptor()->FindFieldByName("single_any")),
*message2,
ABSL_DIE_IF_NULL(
message2->GetDescriptor()->FindFieldByName("single_any")),
pool, factory),
IsOkAndHolds(IsTrue()));
EXPECT_THAT(MessageFieldEquals(
*message2,
ABSL_DIE_IF_NULL(
message2->GetDescriptor()->FindFieldByName("single_any")),
*message1,
ABSL_DIE_IF_NULL(
message1->GetDescriptor()->FindFieldByName("single_any")),
pool, factory),
IsOkAndHolds(IsTrue()));
EXPECT_THAT(MessageFieldEquals(
*message1,
ABSL_DIE_IF_NULL(
message1->GetDescriptor()->FindFieldByName("single_any")),
*message3,
ABSL_DIE_IF_NULL(
message3->GetDescriptor()->FindFieldByName("single_any")),
pool, factory),
IsOkAndHolds(IsFalse()));
EXPECT_THAT(MessageFieldEquals(
*message3,
ABSL_DIE_IF_NULL(
message3->GetDescriptor()->FindFieldByName("single_any")),
*message1,
ABSL_DIE_IF_NULL(
message1->GetDescriptor()->FindFieldByName("single_any")),
pool, factory),
IsOkAndHolds(IsFalse()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/message_equality.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/message_equality_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
2896c2b7-5f4b-4da3-aa65-c05452830b3a | cpp | tensorflow/tensorflow | mkl_quantized_conv_ops | tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h | tensorflow/core/kernels/mkl/mkl_quantized_conv_ops_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_MKL_QUANTIZED_CONV_OPS_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64_t>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64_t>(Eigen::NumTraits<T>::lowest());
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64_t>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64_t>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64_t>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#ifdef ENABLE_ONEDNN_OPENMP
#pragma omp parallel for
#endif
for (int64_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
}
#endif
#endif | #if defined(INTEL_MKL) && defined(ENABLE_MKL)
#define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
class QuantizedConv2DTest : public OpsTestBase {
protected:
template <typename Tinput>
void ConfigureQuantizedConv2D(const bool old_api, const int& stride,
const string& padding,
const std::vector<int> padding_values = {}) {
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_conv_op", "_MklQuantizedConv2D")
.Input(FakeInput(DataTypeToEnum<Tinput>::v()))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<Tinput>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("padding_list", padding_values)
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(
NodeDefBuilder("quantized_conv_op", "_FusedQuantizedConv2D")
.Attr("Thost_inputs", {DataTypeToEnum<Tinput>::v(), DT_QINT8,
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DataTypeToEnum<Tinput>::v())
.Attr("Tfilter", DT_QINT8)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("explicit_paddings", padding_values)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
}
void RunQuantizedDepthwiseConv2DOp(const bool& bias_enabled) {
const int depth = 2;
const int image_width = 2;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 7, 8, 13, 14, 3, 4, 9, 10, 15, 16, 5, 6, 11, 12, 17, 18});
if (bias_enabled) {
AddInputFromArray<float>(TensorShape({depth}), {1.0f, 1.0f});
}
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(DT_QINT32, TensorShape({image_batch_count, image_height,
image_width, depth}));
if (bias_enabled) {
test::FillValues<qint32>(&expected, {229, 301, 133, 181, 483, 597, 267,
345, 373, 453, 181, 237});
} else {
test::FillValues<qint32>(&expected, {228, 300, 132, 180, 482, 596, 266,
344, 372, 452, 180, 236});
}
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestSmall(const bool old_api) {
const int stride = 1;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
const float image_min = 0.0f;
const float image_max = 255.0f;
Tensor image_float(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Tensor image_quantized =
FloatTensorToQuantized<quint8>(image_float, image_min, image_max);
const int filter_size = 3;
const int filter_count = 1;
const float filter_min = -127.0f;
const float filter_max = 127.0f;
Tensor filter_float(DT_FLOAT,
{filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter_float, {1, 4, 7, 2, 5, 8, 3, 6, 9});
Tensor filter_quantized =
FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max);
AddInputFromArray<quint8>(image_quantized.shape(),
image_quantized.flat<quint8>());
AddInputFromArray<qint8>(filter_quantized.shape(),
filter_quantized.flat<qint8>());
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height;
Tensor expected_float(DT_FLOAT,
TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(&expected_float, {105, 150, 183, 95, 235, 312, 357,
178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
Tensor output_float =
QuantizedTensorToFloat<qint32>(output, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
}
void TestSmallS8(const bool old_api) {
const int stride = 1;
const int depth = 1;
const int image_width = 3;
const int image_height = 3;
const int image_batch_count = 1;
const float image_min = -127.0f;
const float image_max = 127.0f;
const string padding = "VALID";
ConfigureQuantizedConv2D<qint8>(old_api, stride, padding);
Tensor image_float(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image_float, {2, 3, 4, 6, -4, -2, 3, 0, 4});
Tensor image_quantized =
FloatTensorToQuantized<qint8>(image_float, image_min, image_max);
const int filter_size = 3;
const int filter_count = 1;
const float filter_min = -127.0f;
const float filter_max = 127.0f;
Tensor filter_float(DT_FLOAT,
{filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter_float, {1, 4, 2, 0, 5, -1, 3, -1, -3});
Tensor filter_quantized =
FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max);
AddInputFromArray<qint8>(image_quantized.shape(),
image_quantized.flat<qint8>());
AddInputFromArray<qint8>(filter_quantized.shape(),
filter_quantized.flat<qint8>());
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = 1;
const int expected_height = 1;
Tensor expected_float(DT_FLOAT,
TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(&expected_float, {1});
const Tensor& output = *GetOutput(0);
const float output_min = GetOutput(1)->scalar<float>()();
const float output_max = GetOutput(2)->scalar<float>()();
Tensor output_float =
QuantizedTensorToFloat<qint32>(output, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
}
void TestSmall32Bit(const bool old_api) {
const int stride = 1;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{10, 40, 70, 20, 50, 80, 30, 60, 90});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(
&expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800,
18700, 23400, 26100, 12100});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestSmall32BitWithPadding(const bool old_api) {
const int stride = 1;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{10, 40, 70, 20, 50, 80, 30, 60, 90});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(
&expected, {10500, 15000, 18300, 9500, 23500, 31200, 35700, 17800,
18700, 23400, 26100, 12100});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestOddPadding(const bool old_api) {
const int stride = 2;
string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 4;
const int image_batch_count = 1;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
const int expected_height = image_height / stride;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(&expected, {348, 252, 274, 175});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestOddPaddingBatch(const bool old_api) {
const int stride = 2;
const string padding = "SAME";
ConfigureQuantizedConv2D<quint8>(old_api, stride, padding);
const int depth = 1;
const int image_width = 4;
const int image_height = 4;
const int image_batch_count = 3;
AddInputFromArray<quint8>(
TensorShape({image_batch_count, image_height, image_width, depth}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
const int filter_size = 3;
const int filter_count = 1;
AddInputFromArray<qint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
const int expected_height = image_height / stride;
Tensor expected(DT_QINT32, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<qint32>(&expected, {348, 252, 274, 175, 348, 252, 274, 175,
348, 252, 274, 175});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
void TestDepthwiseConv2D(const bool old_api) {
const int stride = 1;
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_MklQuantizedDepthwiseConv2D")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<quint8>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_FusedQuantizedDepthwiseConv2D")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DT_QUINT8)
.Attr("Tfilter", DT_QINT8)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
RunQuantizedDepthwiseConv2DOp(false);
}
void TestDepthwiseConv2DWithBias(const bool old_api) {
const int stride = 1;
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_MklQuantizedDepthwiseConv2DWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<quint8>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(
NodeDefBuilder("quantized_depthwise_conv_op",
"_FusedQuantizedDepthwiseConv2D")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DT_QUINT8)
.Attr("Tfilter", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
RunQuantizedDepthwiseConv2DOp(true);
}
void TestDepthwiseConv2DWithBiasAndRelu(const bool old_api) {
const int stride = 1;
if (old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_depthwise_conv_op",
"_MklQuantizedDepthwiseConv2DWithBiasAndRelu")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<quint8>::v())
.Attr("Tfilter", DataTypeToEnum<qint8>::v())
.Attr("out_type", DataTypeToEnum<qint32>::v())
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_EXPECT_OK(
NodeDefBuilder("quantized_depthwise_conv_op",
"_FusedQuantizedDepthwiseConv2D")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", DT_QUINT8)
.Attr("Tfilter", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tsummand", DT_QINT32)
.Attr("out_type", DT_QINT32)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Attr("fused_ops", {"BiasAdd", "Relu"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
RunQuantizedDepthwiseConv2DOp(true);
}
};
TEST_F(QuantizedConv2DTest, SmallOldAPI) { TestSmall(true); }
TEST_F(QuantizedConv2DTest, SmallNewAPI) { TestSmall(false); }
TEST_F(QuantizedConv2DTest, SmallS8OldAPI) { TestSmallS8(true); }
TEST_F(QuantizedConv2DTest, SmallS8NewAPI) { TestSmallS8(false); }
TEST_F(QuantizedConv2DTest, Small32BitOldAPI) { TestSmall32Bit(true); }
TEST_F(QuantizedConv2DTest, Small32BitNewAPI) { TestSmall32Bit(false); }
TEST_F(QuantizedConv2DTest, Small32BitWithPaddingOldAPI) {
TestSmall32BitWithPadding(true);
}
TEST_F(QuantizedConv2DTest, Small32BitWithPaddingNewAPI) {
TestSmall32BitWithPadding(false);
}
TEST_F(QuantizedConv2DTest, OddPaddingOldAPI) { TestOddPadding(true); }
TEST_F(QuantizedConv2DTest, OddPaddingNewAPI) { TestOddPadding(false); }
TEST_F(QuantizedConv2DTest, OddPaddingBatchOldAPI) {
TestOddPaddingBatch(true);
}
TEST_F(QuantizedConv2DTest, OddPaddingBatchNewAPI) {
TestOddPaddingBatch(false);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DOldAPI) {
TestDepthwiseConv2D(true);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DNewAPI) {
TestDepthwiseConv2D(false);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasOldAPI) {
TestDepthwiseConv2DWithBias(true);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasNewAPI) {
TestDepthwiseConv2DWithBias(false);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasAndReluOldAPI) {
TestDepthwiseConv2DWithBiasAndRelu(true);
}
TEST_F(QuantizedConv2DTest, DepthwiseConv2DWithBiasAndReluNewAPI) {
TestDepthwiseConv2DWithBiasAndRelu(false);
}
class QuantizedConvTest : public OpsTestBase {
protected:
template <typename Tinput, typename Tfilter, typename Toutput,
typename Tbias = float, typename Tsummand = float>
void RunQuantizedKernel(Tensor& image_float, Tensor& filter_float,
Tensor& bias_float, Tensor& summand_float,
Tensor& expected_out_float,
const std::vector<string>& fused_ops,
const float tol = 1.0) {
bool fuse_bias = std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") !=
fused_ops.end();
bool fuse_sum =
std::find(fused_ops.begin(), fused_ops.end(), "Sum") != fused_ops.end();
bool fuse_requantize = std::find(fused_ops.begin(), fused_ops.end(),
"Requantize") != fused_ops.end();
float image_min, image_max;
MklTestingUtil::ComputeMinMax<float>(image_float, &image_min, &image_max);
const float image_max_abs =
std::max(std::abs(image_min), std::abs(image_max));
Tensor image_quantized;
MklTestingUtil::RunMklQuantizeOp(image_float, -image_max_abs, image_max_abs,
DataTypeToEnum<Tinput>::v(), "SCALED",
&image_quantized);
float filter_min, filter_max;
MklTestingUtil::ComputeMinMax<float>(filter_float, &filter_min,
&filter_max);
const float filter_max_abs =
std::max(std::abs(filter_min), std::abs(filter_max));
Tensor filter_quantized;
MklTestingUtil::RunMklQuantizeOp(
filter_float, -filter_max_abs, filter_max_abs,
DataTypeToEnum<Tfilter>::v(), "SCALED", &filter_quantized);
AddInputFromArray<Tinput>(image_quantized.shape(),
image_quantized.flat<Tinput>());
AddInputFromArray<Tfilter>(filter_quantized.shape(),
filter_quantized.flat<Tfilter>());
if (fuse_bias) {
if (std::is_same<Tbias, float>::value) {
AddInputFromArray<Tbias>(bias_float.shape(), bias_float.flat<Tbias>());
} else {
float bias_min, bias_max;
MklTestingUtil::ComputeMinMax<float>(bias_float, &bias_min, &bias_max);
const float bias_max_abs =
std::max(std::abs(bias_min), std::abs(bias_max));
Tensor bias_quantized;
MklTestingUtil::RunMklQuantizeOp(
bias_float, -bias_max_abs, bias_max_abs, DataTypeToEnum<Tbias>::v(),
"SCALED", &bias_quantized);
AddInputFromArray<Tbias>(bias_quantized.shape(),
bias_quantized.flat<Tbias>());
}
}
bool is_quantized_summand = false;
float summand_max_abs = 0;
if (fuse_sum) {
if (std::is_same<Tsummand, float>::value) {
AddInputFromArray<Tsummand>(summand_float.shape(),
summand_float.flat<Tsummand>());
} else {
is_quantized_summand = true;
float summand_min, summand_max;
MklTestingUtil::ComputeMinMax<float>(summand_float, &summand_min,
&summand_max);
summand_max_abs =
std::max(std::abs(summand_min), std::abs(summand_max));
Tensor summand_quantized;
MklTestingUtil::RunMklQuantizeOp(
summand_float, -summand_max_abs, summand_max_abs,
DataTypeToEnum<Tsummand>::v(), "SCALED", &summand_quantized);
AddInputFromArray<Tsummand>(summand_quantized.shape(),
summand_quantized.flat<Tsummand>());
}
}
AddInputFromArray<float>(TensorShape({}), {-image_max_abs});
AddInputFromArray<float>(TensorShape({}), {image_max_abs});
AddInputFromArray<float>(TensorShape({}), {-filter_max_abs});
AddInputFromArray<float>(TensorShape({}), {filter_max_abs});
if (is_quantized_summand) {
AddInputFromArray<float>(TensorShape({}), {-summand_max_abs});
AddInputFromArray<float>(TensorShape({}), {summand_max_abs});
}
if (fuse_requantize) {
float expected_output_min, expected_output_max;
MklTestingUtil::ComputeMinMax<float>(
expected_out_float, &expected_output_min, &expected_output_max);
const float output_max_abs = std::max(std::abs(expected_output_min),
std::abs(expected_output_max));
AddInputFromArray<float>(TensorShape({}), {-output_max_abs});
AddInputFromArray<float>(TensorShape({}), {output_max_abs});
}
TF_ASSERT_OK(RunOpKernel());
const Tensor& output = *GetOutput(0);
const Tensor& output_min = *GetOutput(1);
const Tensor& output_max = *GetOutput(2);
const float output_max_value = output_max.scalar<float>()();
Tensor output_float;
MklTestingUtil::RunDequantizeOp(output, output_min, output_max, "SCALED",
&output_float);
if (std::is_same<Tsummand, qint8>::value &&
std::is_same<Toutput, quint8>::value) {
for (int i = 0; i < expected_out_float.NumElements(); i++) {
float* expected_data =
const_cast<float*>(expected_out_float.flat<float>().data());
expected_data[i] =
std::min(expected_data[i], output_max_value * 127.0f / 255.0f);
}
}
test::ExpectTensorNear<float>(expected_out_float, output_float, tol);
}
void RunFloatConv(const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, const Tensor& summand_data,
Tensor* output, const bool is_depthwise,
const std::vector<string>& fused_ops, const string padding,
const int stride) {
auto root = tensorflow::Scope::NewRootScope();
auto input_data_op =
ops::Const(root.WithOpName("input"), Input::Initializer(input_data));
Output out_op;
if (is_depthwise) {
out_op = ops::DepthwiseConv2dNative(
root.WithOpName("conv"), input_data_op,
ops::Const(root.WithOpName("filter"),
Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding);
} else {
out_op = ops::Conv2D(root.WithOpName("conv"), input_data_op,
ops::Const(root.WithOpName("filter"),
Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding);
}
string last_op = "";
for (int i = 0; i < fused_ops.size(); ++i) {
if (fused_ops[i] == "BiasAdd") {
last_op = "with_bias";
out_op = ops::BiasAdd(
root.WithOpName(last_op), out_op,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
}
if (fused_ops[i] == "Sum") {
last_op = "with_sum";
out_op = ops::AddV2(root.WithOpName(last_op), out_op,
ops::Const(root.WithOpName("summand"),
Input::Initializer(summand_data)));
}
if (fused_ops[i] == "Relu") {
last_op = "with_relu";
out_op = ops::Relu(root.WithOpName(last_op), out_op);
}
}
tensorflow::GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
MklTestingUtil::RunGraph(graph_def, last_op, output);
}
template <typename Tinput, typename Toutput>
void TestBiasAddFusion(bool fuse_requantize, const bool is_depthwise,
string activation = "", const float tol = 1.0) {
const int stride = 1;
const string padding = "VALID";
std::vector<string> fused_ops = {"BiasAdd"};
std::map<string, DataType> data_types = {
{"Tinput", DataTypeToEnum<Tinput>::v()},
{"Tfilter", DT_QINT8},
{"Tbias", DT_FLOAT},
{"Tsummand", DataTypeToEnum<Toutput>::v()},
{"out_type", DataTypeToEnum<Toutput>::v()}};
std::vector<DataType> input_types = {data_types["Tinput"],
data_types["Tfilter"],
data_types["Tbias"],
DT_FLOAT,
DT_FLOAT,
DT_FLOAT,
DT_FLOAT};
if (!activation.empty()) {
fused_ops.push_back(activation);
}
if (fuse_requantize) {
fused_ops.push_back("Requantize");
input_types.push_back(DT_FLOAT);
input_types.push_back(DT_FLOAT);
}
TF_EXPECT_OK(
NodeDefBuilder("quantized_conv_op",
is_depthwise ? "_FusedQuantizedDepthwiseConv2D"
: "_FusedQuantizedConv2D")
.Attr("Thost_inputs", input_types)
.Attr("Thost_outputs", {data_types["out_type"], DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", data_types["Tinput"])
.Attr("Tfilter", data_types["Tfilter"])
.Attr("Tbias", data_types["Tbias"])
.Attr("Tsummand", data_types["Tsummand"])
.Attr("out_type", data_types["out_type"])
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("fused_ops", fused_ops)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int image_batch = 1;
const int image_height = 6;
const int image_width = 6;
const int channels = 2;
const int filter_height = 2;
const int filter_width = 2;
const int filter_out_channels = 2;
Tensor image_float(DT_FLOAT,
{image_batch, image_height, image_width, channels});
test::FillValues<float>(
&image_float, {4, 3, 1, 0, 4, 6, 3, 1, 2, 1, 0, 2, 6, 2, 1, 3, 1, 3,
6, 1, 2, 5, 3, 2, 3, 4, 1, 4, 0, 3, 3, 1, 2, 0, 1, 1,
3, 3, 1, 0, 2, 1, 4, 3, 3, 2, 1, 4, 1, 0, 2, 2, 5, 0,
3, 3, 3, 1, 0, 2, 2, 1, 3, 2, 6, 3, 4, 6, 0, 1, 3, 5});
Tensor filter_float(
DT_FLOAT, {filter_height, filter_width, channels, filter_out_channels});
test::FillValues<float>(
&filter_float, {-2, -3, 0, 3, 1, -1, 4, 2, -3, -2, -4, 0, 4, 3, 1, 2});
Tensor bias_float(DT_FLOAT, {is_depthwise ? channels * filter_out_channels
: filter_out_channels});
if (is_depthwise) {
test::FillValues<float>(&bias_float, {1, 2, 1, 2});
} else {
test::FillValues<float>(&bias_float, {1, 2});
}
Tensor expected_float, dummy_summand;
RunFloatConv(image_float, filter_float, bias_float, dummy_summand,
&expected_float, is_depthwise, fused_ops, padding, stride);
RunQuantizedKernel<Tinput, qint8, Toutput, float>(
image_float, filter_float, bias_float, dummy_summand, expected_float,
fused_ops, tol);
}
template <typename Tsummand, typename Toutput>
void TestBiasAddSumActivationFusion(string activation = "") {
const int stride = 1;
const string padding = "VALID";
std::vector<string> fused_ops = {"BiasAdd", "Sum"};
std::map<string, DataType> data_types = {
{"Tinput", DT_QINT8},
{"Tfilter", DT_QINT8},
{"Tbias", DT_FLOAT},
{"Tsummand", DataTypeToEnum<Tsummand>::v()},
{"out_type", DataTypeToEnum<Toutput>::v()}};
std::vector<DataType> input_types = {data_types["Tinput"],
data_types["Tfilter"],
data_types["Tbias"],
data_types["Tsummand"],
DT_FLOAT,
DT_FLOAT,
DT_FLOAT,
DT_FLOAT};
if (std::is_same<Tsummand, quint8>::value ||
std::is_same<Tsummand, qint8>::value) {
input_types.push_back(DT_FLOAT);
input_types.push_back(DT_FLOAT);
}
if (!activation.empty()) {
fused_ops.push_back(activation);
}
if (std::is_same<Toutput, qint8>::value ||
std::is_same<Toutput, quint8>::value) {
fused_ops.push_back("Requantize");
input_types.push_back(DT_FLOAT);
input_types.push_back(DT_FLOAT);
}
TF_EXPECT_OK(
NodeDefBuilder("quantized_conv_op", "_FusedQuantizedConv2D")
.Attr("Thost_inputs", input_types)
.Attr("Thost_outputs", {data_types["out_type"], DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("Tinput", data_types["Tinput"])
.Attr("Tfilter", data_types["Tfilter"])
.Attr("Tbias", data_types["Tbias"])
.Attr("Tsummand", data_types["Tsummand"])
.Attr("out_type", data_types["out_type"])
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("fused_ops", fused_ops)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int image_batch = 1;
const int image_height = 4;
const int image_width = 4;
const int channels = 2;
const int filter_height = 2;
const int filter_width = 2;
const int filter_out_channels = 2;
Tensor image_float(DT_FLOAT,
{image_batch, image_height, image_width, channels});
test::FillValues<float>(&image_float,
{2, 4, 5, 6, 1, 2, 3, 0, 1, 1, 6, 2, 6, 2, 4, 1,
3, 4, 3, 1, 1, 4, 0, 7, 3, 1, 5, 0, 2, 1, 3, 3});
Tensor filter_float(
DT_FLOAT, {filter_height, filter_width, channels, filter_out_channels});
test::FillValues<float>(
&filter_float, {1, -3, 0, 2, 3, -4, 0, 5, 2, 1, -1, -2, -5, 3, 4, 0});
Tensor bias_float(DT_FLOAT, {filter_out_channels});
test::FillValues<float>(&bias_float, {2, 4});
Tensor summand_float(DT_FLOAT, {1, 3, 3, 2});
test::FillValues<float>(
&summand_float, {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9});
Tensor expected_float;
RunFloatConv(image_float, filter_float, bias_float, summand_float,
&expected_float, false, fused_ops, padding,
stride);
RunQuantizedKernel<qint8, qint8, Toutput, float, Tsummand>(
image_float, filter_float, bias_float, summand_float, expected_float,
fused_ops);
}
};
TEST_F(QuantizedConvTest, BiasAddFusion) {
TestBiasAddFusion<qint8, qint32>(false, false);
}
TEST_F(QuantizedConvTest, BiasAddRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, false);
}
TEST_F(QuantizedConvTest, BiasAddReluRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, false, "Relu");
}
TEST_F(QuantizedConvTest, UnsignedInputBiasAddReluRequantizeFusion) {
TestBiasAddFusion<quint8, quint8>(true, false, "Relu", 4.0);
}
TEST_F(QuantizedConvTest, DWBiasAddFusion) {
TestBiasAddFusion<qint8, qint32>(false, true);
}
TEST_F(QuantizedConvTest, DWBiasAddRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, true);
}
TEST_F(QuantizedConvTest, DWBiasAddReluRequantizeFusion) {
TestBiasAddFusion<qint8, qint8>(true, true, "Relu");
}
TEST_F(QuantizedConvTest, DWUnsignedInputBiasAddReluRequantizeFusion) {
TestBiasAddFusion<quint8, quint8>(true, true, "Relu", 4.0);
}
TEST_F(QuantizedConvTest, BiasAddSumReluRequantizeFusion) {
TestBiasAddSumActivationFusion<quint8, quint8>("Relu");
}
TEST_F(QuantizedConvTest, BiasAddSumReluRequantizeFusionSignedSummand) {
TestBiasAddSumActivationFusion<qint8, quint8>("Relu");
}
TEST_F(QuantizedConvTest, BiasAddSumReluFusionFloatSummand) {
TestBiasAddSumActivationFusion<float, qint32>("Relu");
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_quantized_conv_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ddb4901f-a53e-485f-a2f2-dea58d05ec89 | cpp | google/quiche | p256_key_exchange | quiche/quic/core/crypto/p256_key_exchange.cc | quiche/quic/core/crypto/p256_key_exchange_test.cc | #include "quiche/quic/core/crypto/p256_key_exchange.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "openssl/ec.h"
#include "openssl/ecdh.h"
#include "openssl/err.h"
#include "openssl/evp.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
P256KeyExchange::P256KeyExchange(bssl::UniquePtr<EC_KEY> private_key,
const uint8_t* public_key)
: private_key_(std::move(private_key)) {
memcpy(public_key_, public_key, sizeof(public_key_));
}
P256KeyExchange::~P256KeyExchange() {}
std::unique_ptr<P256KeyExchange> P256KeyExchange::New() {
return New(P256KeyExchange::NewPrivateKey());
}
std::unique_ptr<P256KeyExchange> P256KeyExchange::New(absl::string_view key) {
if (key.empty()) {
QUIC_DLOG(INFO) << "Private key is empty";
return nullptr;
}
const uint8_t* keyp = reinterpret_cast<const uint8_t*>(key.data());
bssl::UniquePtr<EC_KEY> private_key(
d2i_ECPrivateKey(nullptr, &keyp, key.size()));
if (!private_key.get() || !EC_KEY_check_key(private_key.get())) {
QUIC_DLOG(INFO) << "Private key is invalid.";
return nullptr;
}
uint8_t public_key[kUncompressedP256PointBytes];
if (EC_POINT_point2oct(EC_KEY_get0_group(private_key.get()),
EC_KEY_get0_public_key(private_key.get()),
POINT_CONVERSION_UNCOMPRESSED, public_key,
sizeof(public_key), nullptr) != sizeof(public_key)) {
QUIC_DLOG(INFO) << "Can't get public key.";
return nullptr;
}
return absl::WrapUnique(
new P256KeyExchange(std::move(private_key), public_key));
}
std::string P256KeyExchange::NewPrivateKey() {
bssl::UniquePtr<EC_KEY> key(EC_KEY_new_by_curve_name(NID_X9_62_prime256v1));
if (!key.get() || !EC_KEY_generate_key(key.get())) {
QUIC_DLOG(INFO) << "Can't generate a new private key.";
return std::string();
}
int key_len = i2d_ECPrivateKey(key.get(), nullptr);
if (key_len <= 0) {
QUIC_DLOG(INFO) << "Can't convert private key to string";
return std::string();
}
std::unique_ptr<uint8_t[]> private_key(new uint8_t[key_len]);
uint8_t* keyp = private_key.get();
if (!i2d_ECPrivateKey(key.get(), &keyp)) {
QUIC_DLOG(INFO) << "Can't convert private key to string.";
return std::string();
}
return std::string(reinterpret_cast<char*>(private_key.get()), key_len);
}
bool P256KeyExchange::CalculateSharedKeySync(
absl::string_view peer_public_value, std::string* shared_key) const {
if (peer_public_value.size() != kUncompressedP256PointBytes) {
QUIC_DLOG(INFO) << "Peer public value is invalid";
return false;
}
bssl::UniquePtr<EC_POINT> point(
EC_POINT_new(EC_KEY_get0_group(private_key_.get())));
if (!point.get() ||
!EC_POINT_oct2point(
EC_KEY_get0_group(private_key_.get()), point.get(),
reinterpret_cast<const uint8_t*>(
peer_public_value.data()),
peer_public_value.size(), nullptr)) {
QUIC_DLOG(INFO) << "Can't convert peer public value to curve point.";
return false;
}
uint8_t result[kP256FieldBytes];
if (ECDH_compute_key(result, sizeof(result), point.get(), private_key_.get(),
nullptr) != sizeof(result)) {
QUIC_DLOG(INFO) << "Can't compute ECDH shared key.";
return false;
}
shared_key->assign(reinterpret_cast<char*>(result), sizeof(result));
return true;
}
absl::string_view P256KeyExchange::public_value() const {
return absl::string_view(reinterpret_cast<const char*>(public_key_),
sizeof(public_key_));
}
} | #include "quiche/quic/core/crypto/p256_key_exchange.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class P256KeyExchangeTest : public QuicTest {
public:
class TestCallbackResult {
public:
void set_ok(bool ok) { ok_ = ok; }
bool ok() { return ok_; }
private:
bool ok_ = false;
};
class TestCallback : public AsynchronousKeyExchange::Callback {
public:
TestCallback(TestCallbackResult* result) : result_(result) {}
virtual ~TestCallback() = default;
void Run(bool ok) { result_->set_ok(ok); }
private:
TestCallbackResult* result_;
};
};
TEST_F(P256KeyExchangeTest, SharedKey) {
for (int i = 0; i < 5; i++) {
std::string alice_private(P256KeyExchange::NewPrivateKey());
std::string bob_private(P256KeyExchange::NewPrivateKey());
ASSERT_FALSE(alice_private.empty());
ASSERT_FALSE(bob_private.empty());
ASSERT_NE(alice_private, bob_private);
std::unique_ptr<P256KeyExchange> alice(P256KeyExchange::New(alice_private));
std::unique_ptr<P256KeyExchange> bob(P256KeyExchange::New(bob_private));
ASSERT_TRUE(alice != nullptr);
ASSERT_TRUE(bob != nullptr);
const absl::string_view alice_public(alice->public_value());
const absl::string_view bob_public(bob->public_value());
std::string alice_shared, bob_shared;
ASSERT_TRUE(alice->CalculateSharedKeySync(bob_public, &alice_shared));
ASSERT_TRUE(bob->CalculateSharedKeySync(alice_public, &bob_shared));
ASSERT_EQ(alice_shared, bob_shared);
}
}
TEST_F(P256KeyExchangeTest, AsyncSharedKey) {
for (int i = 0; i < 5; i++) {
std::string alice_private(P256KeyExchange::NewPrivateKey());
std::string bob_private(P256KeyExchange::NewPrivateKey());
ASSERT_FALSE(alice_private.empty());
ASSERT_FALSE(bob_private.empty());
ASSERT_NE(alice_private, bob_private);
std::unique_ptr<P256KeyExchange> alice(P256KeyExchange::New(alice_private));
std::unique_ptr<P256KeyExchange> bob(P256KeyExchange::New(bob_private));
ASSERT_TRUE(alice != nullptr);
ASSERT_TRUE(bob != nullptr);
const absl::string_view alice_public(alice->public_value());
const absl::string_view bob_public(bob->public_value());
std::string alice_shared, bob_shared;
TestCallbackResult alice_result;
ASSERT_FALSE(alice_result.ok());
alice->CalculateSharedKeyAsync(
bob_public, &alice_shared,
std::make_unique<TestCallback>(&alice_result));
ASSERT_TRUE(alice_result.ok());
TestCallbackResult bob_result;
ASSERT_FALSE(bob_result.ok());
bob->CalculateSharedKeyAsync(alice_public, &bob_shared,
std::make_unique<TestCallback>(&bob_result));
ASSERT_TRUE(bob_result.ok());
ASSERT_EQ(alice_shared, bob_shared);
ASSERT_NE(0u, alice_shared.length());
ASSERT_NE(0u, bob_shared.length());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/p256_key_exchange.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/p256_key_exchange_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c6347815-cf44-490a-9deb-925367e87a81 | cpp | tensorflow/tensorflow | value_range | third_party/xla/xla/service/value_range.cc | third_party/xla/xla/service/value_range_test.cc | #include "xla/service/value_range.h"
#include <optional>
#include <string>
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
std::optional<int64_t> Range::GetSingleSignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetSignedValue();
}
std::optional<int64_t> Range::GetSingleUnsignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetUnsignedValue();
}
std::string Range::ToString() const {
if (IsEmpty()) {
return std::string("Empty");
}
return absl::StrCat("min: ", min_.ToString(), " max: ", max_.ToString());
}
Range RecursivelyIdentifyRange(
const HloInstruction* instr,
const absl::flat_hash_map<const HloInstruction*, Range>&
predefined_ranges) {
if ((!instr->shape().IsInteger() && instr->shape().element_type() != PRED) ||
instr->shape().dimensions_size() != 0) {
return Range{};
}
VLOG(5) << "Computing Range for " << instr->ToString();
auto it = predefined_ranges.find(instr);
if (it != predefined_ranges.end()) {
VLOG(5) << "Found range! " << it->second.max().GetSignedValue() << " "
<< it->second.min().GetSignedValue();
return it->second;
}
switch (instr->opcode()) {
case HloOpcode::kCompare: {
VLOG(5) << "Handling Compare";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (instr->comparison_direction() != ComparisonDirection::kLt) {
return Range{};
}
if (lhs.max().lt(rhs.min())) {
return Range{ConstantValue::GetOne(1, false),
ConstantValue::GetOne(1, false),
true};
}
if (!lhs.min().lt(rhs.max())) {
return Range{
ConstantValue::GetZero(1, false),
ConstantValue::GetZero(1, false),
true};
}
VLOG(5) << "Compare failed";
VLOG(5) << "rhs max " << rhs.max().GetSignedValue() << " rhs min "
<< rhs.min().GetSignedValue() << " lhs max "
<< lhs.max().GetSignedValue() << " lhs min "
<< lhs.min().GetSignedValue();
return Range{};
}
case HloOpcode::kConstant: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Constant";
const int64_t bitwidth =
primitive_util::BitWidth(instr->shape().element_type());
const bool is_signed =
primitive_util::IsSignedIntegralType(instr->shape().element_type());
if (is_signed) {
const int64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetSigned(value, bitwidth),
ConstantValue::GetSigned(value, bitwidth),
true};
}
const uint64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetUnsigned(value, bitwidth),
ConstantValue::GetUnsigned(value, bitwidth),
true};
}
case HloOpcode::kAdd: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Add";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().add(rhs.min());
ConstantValue max = lhs.max().add(rhs.max());
if (max.lt(min)) {
VLOG(5) << "Add wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
case HloOpcode::kSelect: {
VLOG(5) << "Handling Select";
const HloInstruction* cmp = instr->operand(0);
Range cmp_range = RecursivelyIdentifyRange(cmp, predefined_ranges);
if (cmp_range.IsEmpty() || !cmp_range.IsSingleValue()) {
VLOG(5) << "Select failed";
return Range{};
}
if (cmp_range.GetSingleSignedValue() == 0) {
return RecursivelyIdentifyRange(instr->operand(2), predefined_ranges);
}
return RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
}
case HloOpcode::kSubtract: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Subtract";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().sub(rhs.max());
ConstantValue max = lhs.max().sub(rhs.min());
if (max.lt(min)) {
VLOG(5) << "Subtract wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
default:
break;
}
VLOG(5) << "Unsupported instruction: " << instr->ToString();
return Range{};
}
} | #include "xla/service/value_range.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ValueRangeTest : public HloTestBase {};
TEST_F(ValueRangeTest, AddedValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 124);
EXPECT_EQ(range.max().GetSignedValue(), 129);
}
TEST_F(ValueRangeTest, AddedValueUnsigned) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = u16[] constant(32768)
p0 = u16[] parameter(0)
ROOT %a = u16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, false),
ConstantValue::GetUnsigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetUnsignedValue(), 32768);
EXPECT_EQ(range.max().GetUnsignedValue(), 32773);
}
TEST_F(ValueRangeTest, SubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), -124);
EXPECT_EQ(range.max().GetSignedValue(), -119);
}
TEST_F(ValueRangeTest, SelectValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(p0, c0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), -119);
EXPECT_EQ(range.min().GetSignedValue(), -124);
}
TEST_F(ValueRangeTest, SelectValue2) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(c0, p0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(1);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), 129);
EXPECT_EQ(range.min().GetSignedValue(), 124);
}
TEST_F(ValueRangeTest, AddSubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
c1 = s32[] constant(12)
c2 = s32[] constant(5)
p0 = s32[] parameter(0)
sub = s32[] subtract(p0, c0)
a = s32[] add(sub, c1)
sub2 = s32[] subtract(c2, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(1)->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 112);
EXPECT_EQ(range.max().GetSignedValue(), 117);
}
TEST_F(ValueRangeTest, SubtractWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetSigned(-32768, 16),
ConstantValue::GetZero(16, true),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
TEST_F(ValueRangeTest, AddWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetZero(16, true),
ConstantValue::GetSigned(32760, 16),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/value_range.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/value_range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed0939e5-4c7d-4e30-8840-90284cbc9a53 | cpp | tensorflow/tensorflow | graph | tensorflow/core/graph/graph.cc | tensorflow/core/graph/graph_test.cc | #include "tensorflow/core/graph/graph.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/while_context.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
Node::NodeClass Node::GetNodeClassForOp(const std::string& ts) {
static const absl::flat_hash_map<std::string, Node::NodeClass>*
kNodeClassTable =
#define REF_CLASS(key, value) \
{key, value}, { "Ref" key, value }
new absl::flat_hash_map<std::string, Node::NodeClass>({
REF_CLASS("Switch", NC_SWITCH),
REF_CLASS("_SwitchN", NC_SWITCH),
REF_CLASS("Merge", NC_MERGE),
REF_CLASS("Enter", NC_ENTER),
REF_CLASS("Exit", NC_EXIT),
REF_CLASS("NextIteration", NC_NEXT_ITERATION),
{"LoopCond", NC_LOOP_COND},
{"ControlTrigger", NC_CONTROL_TRIGGER},
{"_Send", NC_SEND},
{"_HostSend", NC_HOST_SEND},
{"_Recv", NC_RECV},
{"_HostRecv", NC_HOST_RECV},
{"Const", NC_CONSTANT},
{"HostConst", NC_CONSTANT},
{"Variable", NC_VARIABLE},
{"VariableV2", NC_VARIABLE},
REF_CLASS("Identity", NC_IDENTITY),
{"GetSessionHandle", NC_GET_SESSION_HANDLE},
{"GetSessionHandleV2", NC_GET_SESSION_HANDLE},
{"GetSessionTensor", NC_GET_SESSION_TENSOR},
{"DeleteSessionTensor", NC_DELETE_SESSION_TENSOR},
{"Size", NC_METADATA},
{"Shape", NC_METADATA},
{"Rank", NC_METADATA},
{"_ScopedAllocator", NC_SCOPED_ALLOCATOR},
{"CollectiveReduce", NC_COLLECTIVE},
{"CollectiveBcastSend", NC_COLLECTIVE},
{"CollectiveBcastRecv", NC_COLLECTIVE},
{"CollectiveGather", NC_COLLECTIVE},
{"FakeParam", NC_FAKE_PARAM},
{"PartitionedCall", NC_PARTITIONED_CALL},
{"StatefulPartitionedCall", NC_PARTITIONED_CALL},
{"SymbolicGradient", NC_SYMBOLIC_GRADIENT},
{"If", NC_IF},
{"StatelessIf", NC_IF},
{"While", NC_WHILE},
{"StatelessWhile", NC_WHILE},
{"Case", NC_CASE},
{"StatelessCase", NC_CASE},
{"_Arg", NC_ARG},
{"_DeviceArg", NC_ARG},
{"_Retval", NC_RETVAL},
{"_DeviceRetval", NC_RETVAL},
{"_XlaMerge", NC_MERGE},
});
#undef REF_CLASS
auto it = kNodeClassTable->find(ts);
if (it != kNodeClassTable->end()) {
return it->second;
} else {
return NC_OTHER;
}
}
std::string Node::DebugString() const {
std::string ret = strings::StrCat("{name:'", name(), "' id:", id_);
if (IsSource()) {
strings::StrAppend(&ret, " source}");
} else if (IsSink()) {
strings::StrAppend(&ret, " sink}");
} else {
strings::StrAppend(&ret, " op device:", "{requested: '", requested_device(),
"', assigned: '", assigned_device_name(), "'}", " def:{",
SummarizeNode(*this), "}}");
}
return ret;
}
Node::Node()
: id_(-1),
cost_id_(-1),
class_(NC_UNINITIALIZED),
props_(nullptr),
assigned_device_name_index_(0),
while_ctx_(nullptr) {}
void Node::Initialize(int id, int cost_id,
std::shared_ptr<NodeProperties> props,
Node::NodeClass node_class) {
DCHECK_EQ(id_, -1);
DCHECK(in_edges_.empty());
DCHECK(out_edges_.empty());
id_ = id;
cost_id_ = cost_id;
props_ = std::move(props);
class_ = node_class;
}
void Node::Clear() {
in_edges_.clear();
out_edges_.clear();
id_ = -1;
cost_id_ = -1;
class_ = NC_UNINITIALIZED;
props_.reset();
assigned_device_name_index_ = 0;
}
void Node::UpdateProperties() {
DataTypeVector inputs;
DataTypeVector outputs;
Status status =
InOutTypesForNode(props_->node_def, *(props_->op_def), &inputs, &outputs);
if (!status.ok()) {
LOG(ERROR) << "Failed at updating node: " << status;
return;
}
if (props_->input_types != inputs || props_->output_types != outputs) {
if (TF_PREDICT_TRUE(props_.use_count() == 1)) {
props_->input_types = inputs;
props_->input_types_slice = props_->input_types;
props_->output_types = outputs;
props_->output_types_slice = props_->output_types;
} else {
props_ = std::make_shared<NodeProperties>(
props_->op_def, std::move(props_->node_def), inputs, outputs);
}
}
}
void Node::ClearTypeInfo() {
if (props_->node_def.has_experimental_type()) {
MaybeCopyOnWrite();
props_->node_def.clear_experimental_type();
}
}
Status Node::ShrinkTypeInfo(const absl::flat_hash_map<int, int>& index_mapping,
const string& type_attr_name,
bool update_full_type) {
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(def(), type_attr_name, &dtypes));
std::vector<DataType> new_dtypes;
new_dtypes.reserve(index_mapping.size());
for (int i = 0; i < dtypes.size(); ++i) {
if (index_mapping.contains(i)) {
new_dtypes.emplace_back(dtypes[i]);
}
}
ClearAttr(type_attr_name);
AddAttr(type_attr_name, new_dtypes);
if (!update_full_type || !def().has_experimental_type()) {
return absl::OkStatus();
}
FullTypeDef ft = def().experimental_type();
if (ft.type_id() != TFT_PRODUCT) {
return errors::Internal(
"In ShrinkTypeInfo, full type information does not start with "
"TFT_PRODUCT\n",
ft.DebugString());
}
if (ft.args_size() != dtypes.size()) {
return errors::Internal("In ShrinkTypeInfo, ft.args_size() ",
ft.args_size(), " != dtypes.size() ",
dtypes.size());
}
FullTypeDef new_ft;
new_ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < ft.args_size(); ++i) {
if (index_mapping.contains(i)) {
(*new_ft.add_args()) = ft.args(i);
}
}
MaybeCopyOnWrite();
*(mutable_def()->mutable_experimental_type()) = new_ft;
return absl::OkStatus();
}
const std::string& Node::name() const { return props_->node_def.name(); }
const std::string& Node::type_string() const { return props_->node_def.op(); }
const NodeDef& Node::def() const { return props_->node_def; }
const OpDef& Node::op_def() const { return *props_->op_def; }
NodeDef* Node::mutable_def() { return &props_->node_def; }
int32 Node::num_inputs() const { return props_->input_types.size(); }
DataType Node::input_type(int32_t i) const { return props_->input_types[i]; }
const DataTypeVector& Node::input_types() const { return props_->input_types; }
int32 Node::num_outputs() const { return props_->output_types.size(); }
DataType Node::output_type(int32_t o) const { return props_->output_types[o]; }
const DataTypeVector& Node::output_types() const {
return props_->output_types;
}
AttrSlice Node::attrs() const { return AttrSlice(def()); }
const protobuf::RepeatedPtrField<std::string>& Node::requested_inputs() const {
return def().input();
}
const std::string& Node::requested_device() const { return def().device(); }
gtl::iterator_range<NeighborIter> Node::out_nodes() const {
return gtl::make_range(NeighborIter(out_edges_.begin(), false),
NeighborIter(out_edges_.end(), false));
}
gtl::iterator_range<NeighborIter> Node::in_nodes() const {
return gtl::make_range(NeighborIter(in_edges_.begin(), true),
NeighborIter(in_edges_.end(), true));
}
void Node::MaybeCopyOnWrite() {
if (!(props_.use_count() == 1)) {
props_ = std::make_shared<NodeProperties>(*props_);
}
}
AttrValue* Node::AddAttrHelper(const std::string& name) {
MaybeCopyOnWrite();
return &((*props_->node_def.mutable_attr())[name]);
}
void Node::ClearAttr(const std::string& name) {
MaybeCopyOnWrite();
(*props_->node_def.mutable_attr()).erase(name);
}
void Node::set_name(std::string name) {
MaybeCopyOnWrite();
props_->node_def.set_name(std::move(name));
}
void Node::set_requested_device(const std::string& device) {
MaybeCopyOnWrite();
props_->node_def.set_device(device);
}
void Node::set_original_node_names(const std::vector<std::string>& names) {
MaybeCopyOnWrite();
props_->node_def.mutable_experimental_debug_info()
->clear_original_node_names();
if (!names.empty()) {
*props_->node_def.mutable_experimental_debug_info()
->mutable_original_node_names() = {names.begin(), names.end()};
}
}
void Node::set_original_func_names(const std::vector<std::string>& names) {
MaybeCopyOnWrite();
props_->node_def.mutable_experimental_debug_info()
->clear_original_func_names();
if (!names.empty()) {
*props_->node_def.mutable_experimental_debug_info()
->mutable_original_func_names() = {names.begin(), names.end()};
}
}
Status Node::input_edge(int idx, const Edge** e) const {
if (idx < 0 || idx >= num_inputs()) {
return errors::InvalidArgument("Invalid input_edge index: ", idx, ", Node ",
name(), " only has ", num_inputs(),
" inputs.");
}
for (const Edge* edge : in_edges()) {
if (edge->dst_input() == idx) {
*e = edge;
return absl::OkStatus();
}
}
return errors::NotFound("Could not find input edge ", idx, " for ", name());
}
Status Node::input_edges(std::vector<const Edge*>* input_edges) const {
input_edges->clear();
input_edges->resize(num_inputs(), nullptr);
for (const Edge* edge : in_edges()) {
if (edge->IsControlEdge()) continue;
if (edge->dst_input() < 0 || edge->dst_input() >= num_inputs()) {
return errors::Internal("Invalid edge input number ", edge->dst_input());
}
if ((*input_edges)[edge->dst_input()] != nullptr) {
return errors::Internal("Duplicate edge input number: ",
edge->dst_input());
}
(*input_edges)[edge->dst_input()] = edge;
}
for (int i = 0; i < num_inputs(); ++i) {
if ((*input_edges)[i] == nullptr) {
return errors::InvalidArgument("Missing edge input number: ", i);
}
}
return absl::OkStatus();
}
Status Node::input_node(int idx, Node** n) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
if (e == nullptr) {
*n = nullptr;
} else {
*n = e->src();
}
return absl::OkStatus();
}
Status Node::input_node(int idx, const Node** const_n) const {
Node* n;
TF_RETURN_IF_ERROR(input_node(idx, &n));
*const_n = n;
return absl::OkStatus();
}
Status Node::input_tensor(int idx, OutputTensor* t) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
DCHECK(e != nullptr);
*t = OutputTensor(e->src(), e->src_output());
return absl::OkStatus();
}
NodeDebugInfo::NodeDebugInfo(const Node& n) : NodeDebugInfo(n.def()) {}
NodeDebugInfo::NodeDebugInfo(const NodeDef& ndef)
: NodeDebugInfo(ndef.name(), ndef.has_experimental_debug_info(),
ndef.experimental_debug_info()) {}
NodeDebugInfo::NodeDebugInfo(
StringPiece node_name, bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info)
: name(node_name) {
if (has_experimental_debug_info) {
const auto& node_names = experimental_debug_info.original_node_names();
original_node_names.assign(node_names.begin(), node_names.end());
const auto& func_names = experimental_debug_info.original_func_names();
original_func_names.assign(func_names.begin(), func_names.end());
}
}
bool InputTensor::operator==(const InputTensor& other) const {
return node == other.node && index == other.index;
}
uint64 InputTensor::Hash::operator()(InputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
bool OutputTensor::operator==(const OutputTensor& other) const {
return node == other.node && index == other.index;
}
uint64 OutputTensor::Hash::operator()(OutputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
Graph::Graph(const OpRegistryInterface* ops)
: ops_(ops, FunctionDefLibrary()),
versions_(new VersionDef),
arena_(8 << 10 ) {
versions_->set_producer(TF_GRAPH_DEF_VERSION);
versions_->set_min_consumer(TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
device_names_.push_back("");
DCHECK_EQ(0, InternDeviceName(""));
NodeDef def;
def.set_name("_SOURCE");
def.set_op("NoOp");
Status status;
Node* source = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(source->id(), kSourceId);
def.set_name("_SINK");
Node* sink = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(sink->id(), kSinkId);
AddControlEdge(source, sink);
}
Graph::Graph(const FunctionLibraryDefinition& flib_def)
: Graph(flib_def.default_registry()) {
if (flib_def.num_functions() > 0 && versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
Status s = ops_.AddLibrary(flib_def);
CHECK(s.ok()) << s.message();
}
Graph::~Graph() {
for (Node* node : nodes_) {
if (node != nullptr) {
node->~Node();
}
}
for (Node* node : free_nodes_) {
node->~Node();
}
}
std::unique_ptr<Graph> Graph::Clone() {
std::unique_ptr<Graph> new_graph(new Graph(flib_def()));
new_graph->Copy(*this);
return new_graph;
}
void Graph::Clear() {
for (Node* n : nodes()) {
if (!n->IsSource() && !n->IsSink()) RemoveNode(n);
}
}
const VersionDef& Graph::versions() const { return *versions_; }
void Graph::set_versions(const VersionDef& versions) { *versions_ = versions; }
void Graph::Copy(const Graph& src) {
SetConstructionContext(src.GetConstructionContextInternal());
for (Node* n : nodes()) {
CHECK(n->IsSource() || n->IsSink()) << "*dest must be empty";
}
set_versions(src.versions());
gtl::FlatMap<const Node*, Node*> node_map;
node_map.reserve(src.num_nodes());
node_map[src.source_node()] = source_node();
node_map[src.sink_node()] = sink_node();
for (Node* n : src.op_nodes()) {
auto copy = CopyNode(n);
copy->in_edges_.reserve(n->in_edges().size());
copy->out_edges_.reserve(n->out_edges().size());
node_map[n] = copy;
}
edges_.reserve(src.num_edges());
for (const Edge* e : src.edges()) {
Node* src_copy = node_map[e->src()];
Node* dst_copy = node_map[e->dst()];
AddEdge(src_copy, e->src_output(), dst_copy, e->dst_input());
}
}
absl::StatusOr<Node*> Graph::AddNode(NodeDef node_def) {
Status s;
Node* out = AddNode(std::move(node_def), &s);
TF_RETURN_IF_ERROR(s);
return out;
}
Node* Graph::AddNode(NodeDef node_def, Status* status) {
const OpRegistrationData* op_reg_data;
status->Update(ops_.LookUp(node_def.op(), &op_reg_data));
if (!status->ok()) return nullptr;
DataTypeVector inputs;
DataTypeVector outputs;
status->Update(
InOutTypesForNode(node_def, op_reg_data->op_def, &inputs, &outputs));
if (!status->ok()) {
*status = AttachDef(*status, node_def);
return nullptr;
}
Node::NodeClass node_class = op_reg_data->is_function_op
? Node::NC_FUNCTION_OP
: Node::GetNodeClassForOp(node_def.op());
if (node_def.has_experimental_type()) {
VLOG(3) << "AddNode: node has type set, skipping type constructor "
<< node_def.name();
} else {
if (op_reg_data->type_ctor != nullptr) {
VLOG(3) << "AddNode: found type constructor for " << node_def.name();
Status s =
full_type::SpecializeType(AttrSlice(node_def), op_reg_data->op_def,
*(node_def.mutable_experimental_type()));
if (!s.ok()) {
*status = errors::InvalidArgument("type error: ", s.ToString());
VLOG(3) << "AddNode: type inference failed for " << node_def.name()
<< ": " << s;
return nullptr;
}
} else {
VLOG(3) << "AddNode: no type constructor for " << node_def.name();
}
}
Node* node = AllocateNode(
std::make_shared<NodeProperties>(&op_reg_data->op_def,
std::move(node_def), inputs, outputs),
nullptr, node_class);
return node;
}
Node* Graph::CopyNode(const Node* node) {
DCHECK(!node->IsSource());
DCHECK(!node->IsSink());
Node* copy = AllocateNode(node->props_, node, node->class_);
copy->set_assigned_device_name(node->assigned_device_name());
const OpDef* op_def;
TF_CHECK_OK(ops_.LookUpOpDef(node->type_string(), &op_def));
if (op_def != node->props_->op_def) {
copy->MaybeCopyOnWrite();
copy->props_->op_def = op_def;
}
copy->SetStackTrace(node->GetStackTrace());
return copy;
}
void Graph::RemoveNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
DCHECK(!node->IsSource());
DCHECK(!node->IsSink());
for (const Edge* e : node->in_edges_) {
CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
}
node->in_edges_.clear();
for (const Edge* e : node->out_edges_) {
CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
}
node->out_edges_.clear();
ReleaseNode(node);
}
const Edge* Graph::AddEdge(Node* source, int x, Node* dest, int y) {
TF_DCHECK_OK(IsValidNode(source)) << source->DebugString();
TF_DCHECK_OK(IsValidNode(dest)) << dest->DebugString();
if (source == source_node() || dest == sink_node() || x == kControlSlot ||
y == kControlSlot) {
DCHECK_EQ(x, kControlSlot) << source->DebugString();
DCHECK_EQ(y, kControlSlot) << dest->DebugString();
}
Edge* e = nullptr;
if (free_edges_.empty()) {
e = new (arena_.Alloc(sizeof(Edge))) Edge;
} else {
e = free_edges_.back();
free_edges_.pop_back();
}
e->id_ = edges_.size();
e->src_ = source;
e->dst_ = dest;
e->src_output_ = x;
e->dst_input_ = y;
CHECK(source->out_edges_.insert(e).second);
CHECK(dest->in_edges_.insert(e).second);
edges_.push_back(e);
++num_edges_;
return e;
}
void Graph::RemoveEdge(const Edge* e) {
TF_DCHECK_OK(IsValidNode(e->src_)) << e->src_->DebugString();
TF_DCHECK_OK(IsValidNode(e->dst_)) << e->dst_->DebugString();
CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
CHECK_EQ(e, edges_[e->id_]);
CHECK_GT(num_edges_, 0);
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
}
void Graph::RecycleEdge(const Edge* e) {
free_edges_.push_back(const_cast<Edge*>(e));
}
const Edge* Graph::AddControlEdge(Node* source, Node* dest,
bool allow_duplicates) {
if (!allow_duplicates) {
for (const Edge* edge : dest->in_edges()) {
if (edge->IsControlEdge() && edge->src() == source) {
return nullptr;
}
}
}
if (!source->IsSource() && !dest->IsSink() && !allow_duplicates) {
const std::string new_input = strings::StrCat("^", source->name());
bool input_exists = false;
for (const std::string& input : dest->props_->node_def.input()) {
if (input == new_input) {
input_exists = true;
break;
}
}
if (!input_exists) {
dest->MaybeCopyOnWrite();
dest->props_->node_def.add_input(new_input);
}
}
return AddEdge(source, kControlSlot, dest, kControlSlot);
}
void Graph::RemoveControlEdge(const Edge* e) {
if (!e->src_->IsSource() && !e->dst_->IsSink()) {
e->dst_->MaybeCopyOnWrite();
std::string e_src_name = strings::StrCat("^", e->src_->name());
auto* inputs = e->dst_->props_->node_def.mutable_input();
for (auto it = inputs->begin(); it != inputs->end(); ++it) {
if (*it == e_src_name) {
inputs->erase(it);
break;
}
}
}
RemoveEdge(e);
}
namespace {
const Edge* FindEdge(const Node* dst, int index) {
for (const Edge* e : dst->in_edges()) {
if (e->dst_input() == index) return e;
}
return nullptr;
}
}
Status Graph::UpdateEdge(Node* new_src, int new_src_index, Node* dst,
int dst_index) {
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
const Edge* e = FindEdge(dst, dst_index);
if (e == nullptr) {
return errors::InvalidArgument("Couldn't find edge to ",
FormatNodeForError(*dst));
}
RemoveEdge(e);
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
(*dst->props_->node_def.mutable_input())[dst_index] =
strings::StrCat(new_src->name(), ":", new_src_index);
return absl::OkStatus();
}
void Graph::AddInput(NodeDef* dst, StringPiece src_name, int src_slot) {
if (src_slot == Graph::kControlSlot) {
dst->add_input(strings::StrCat("^", src_name));
} else if (src_slot == 0) {
dst->add_input(src_name.data(), src_name.size());
} else {
dst->add_input(strings::StrCat(src_name, ":", src_slot));
}
}
Status Graph::AddWhileInputHack(Node* new_src, int new_src_index, Node* dst) {
if (!dst->IsWhileNode()) {
return errors::Internal(
"dst argument to AddWhileEdgeHack should be a While op, got: ",
dst->DebugString());
}
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
int dst_index = 0;
for (const Edge* edge : dst->in_edges()) {
if (edge->IsControlEdge()) continue;
++dst_index;
}
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
dst->props_->node_def.add_input(
strings::StrCat(new_src->name(), ":", new_src_index));
return absl::OkStatus();
}
Status Graph::AddFunctionLibrary(
const FunctionDefLibrary& fdef_lib,
const FunctionDefLibraryStackTraces& library_traces) {
return AddFunctionLibrary(FunctionDefLibrary(fdef_lib), library_traces);
}
Status Graph::AddFunctionLibrary(
FunctionDefLibrary&& fdef_lib,
const FunctionDefLibraryStackTraces& library_traces) {
if (fdef_lib.function_size() > 0 && versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddLibrary(std::move(fdef_lib), library_traces);
}
Status Graph::AddFunctionLibrary(const FunctionDefLibrary& fdef_lib) {
return AddFunctionLibrary(fdef_lib, {});
}
Status Graph::AddFunctionLibrary(FunctionDefLibrary&& fdef_lib) {
return AddFunctionLibrary(std::move(fdef_lib), {});
}
Status Graph::AddFunctionDef(const FunctionDef& fdef,
const StackTracesMap& stack_traces) {
if (versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddFunctionDef(fdef, stack_traces);
}
Status Graph::AddGradientDef(const GradientDef& gdef) {
if (versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddGradientDef(gdef);
}
void Graph::ToGraphDef(GraphDef* graph_def, bool include_flib_def,
bool include_debug_info) const {
ToGraphDefSubRange(graph_def, 0, include_flib_def,
include_debug_info);
}
GraphDef Graph::ToGraphDefDebug() const {
GraphDef ret;
ToGraphDef(&ret);
return ret;
}
void Graph::ToGraphDefSubRange(GraphDef* graph_def, int from_node_id,
bool include_flib_def,
bool include_debug_info) const {
graph_def->Clear();
*graph_def->mutable_versions() = versions();
if (include_flib_def) {
*graph_def->mutable_library() = ops_.ToProto();
}
if (include_debug_info) {
*graph_def->mutable_debug_info() = BuildDebugInfo();
}
graph_def->mutable_node()->Reserve(std::max(1, num_nodes() - from_node_id));
std::vector<const Edge*>
inputs;
for (auto id = from_node_id; id < num_node_ids(); ++id) {
const Node* node = FindNodeId(id);
if (node == nullptr || !node->IsOp()) continue;
NodeDef* node_def = graph_def->add_node();
*node_def = node->def();
if (!node->assigned_device_name().empty()) {
node_def->set_device(node->assigned_device_name());
}
inputs.clear();
inputs.resize(node->num_inputs(), nullptr);
for (const Edge* edge : node->in_edges()) {
if (edge->IsControlEdge()) {
inputs.push_back(edge);
} else {
DCHECK(edge->dst_input() < inputs.size())
<< "Edge " << edge->DebugString()
<< " is overflowing the expected number of inputs ("
<< node->num_inputs() << ") for node " << node->DebugString();
CHECK(inputs[edge->dst_input()] == nullptr)
<< "Edge " << edge->src()->name() << "->" << edge->dst()->name()
<< " conflicts with pre-existing input edge "
<< inputs[edge->dst_input()]->src()->name() << "->"
<< inputs[edge->dst_input()]->dst()->name();
inputs[edge->dst_input()] = edge;
}
}
std::sort(inputs.begin() + node->num_inputs(), inputs.end(),
[](const Edge* a, const Edge* b) -> bool {
return a->src()->name() < b->src()->name();
});
node_def->clear_input();
node_def->mutable_input()->Reserve(inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
const Edge* edge = inputs[i];
if (edge == nullptr) {
if (i < node->requested_inputs().size()) {
node_def->add_input(node->requested_inputs()[i]);
} else {
node_def->add_input("");
}
} else {
const Node* src = edge->src();
if (!src->IsOp()) continue;
AddInput(node_def, src->name(), edge->src_output());
}
}
}
}
std::string Graph::NewName(StringPiece prefix) {
return strings::StrCat(prefix, "/_", name_counter_++);
}
Status Graph::IsValidNode(const Node* node) const {
if (node == nullptr) {
return errors::InvalidArgument("Node is null");
}
const int id = node->id();
if (id < 0) {
return errors::InvalidArgument("node id ", id, " is less than zero");
}
if (static_cast<size_t>(id) >= nodes_.size()) {
return errors::InvalidArgument(
"node id ", id, " is >= than number of nodes in graph ", nodes_.size());
}
if (nodes_[id] != node) {
return errors::InvalidArgument("Node with id ", id,
" is different from the passed in node. "
"Does it belong to a different graph?");
}
return absl::OkStatus();
}
Status Graph::IsValidOutputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_outputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of outputs: ", node->num_outputs(),
") does not have ", "output ", idx);
}
return absl::OkStatus();
}
Status Graph::IsValidInputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_inputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of inputs: ", node->num_inputs(),
") does not have ", "input ", idx);
}
return absl::OkStatus();
}
Node* Graph::AllocateNode(std::shared_ptr<NodeProperties> props,
const Node* cost_node, Node::NodeClass node_class) {
Node* node = nullptr;
if (free_nodes_.empty()) {
node = new (arena_.Alloc(sizeof(Node))) Node;
} else {
node = free_nodes_.back();
free_nodes_.pop_back();
}
node->graph_ = this;
const int id = nodes_.size();
int cost_id = cost_node ? cost_node->cost_id() : id;
node->Initialize(id, cost_id, std::move(props), node_class);
nodes_.push_back(node);
++num_nodes_;
return node;
}
void Graph::ReleaseNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
nodes_[node->id()] = nullptr;
free_nodes_.push_back(node);
--num_nodes_;
node->Clear();
}
int Graph::InternDeviceName(const std::string& device_name) {
if (device_name.empty()) {
return 0;
}
int& index_cell = device_names_map_[device_name];
if (index_cell > 0) {
return index_cell;
}
const int index = device_names_map_.size();
index_cell = index;
device_names_.push_back(device_name);
return index;
}
Status Graph::AddWhileContext(StringPiece frame_name,
std::vector<Node*> enter_nodes,
std::vector<Node*> exit_nodes,
OutputTensor cond_output,
std::vector<OutputTensor> body_inputs,
std::vector<OutputTensor> body_outputs,
WhileContext** result) {
auto pair = while_ctxs_.insert(std::pair<std::string, WhileContext>(
std::string(frame_name),
WhileContext(frame_name, std::move(enter_nodes), std::move(exit_nodes),
cond_output, std::move(body_inputs),
std::move(body_outputs))));
if (!pair.second) {
*result = nullptr;
return errors::InvalidArgument("WhileContext with frame name '", frame_name,
"' already exists");
}
*result = &pair.first->second;
return absl::OkStatus();
}
std::unordered_map<std::string, Node*> Graph::BuildNodeNameIndex() const {
std::unordered_map<std::string, Node*> result;
for (Node* n : nodes()) {
result[n->name()] = n;
}
return result;
}
void Graph::SetNodeType(StringPiece name, const FullTypeDef& ft) {
for (Node* n : op_nodes()) {
if (n->name() == name) {
NodeDef& node_def = n->props_->node_def;
n->MaybeCopyOnWrite();
*(node_def.mutable_experimental_type()) = ft;
break;
}
}
}
void Graph::NodeType(StringPiece name, const FullTypeDef** result) {
*result = nullptr;
for (Node* n : op_nodes()) {
if (n->name() == name) {
NodeDef& node_def = n->props_->node_def;
*result = &node_def.experimental_type();
break;
}
}
}
GraphDebugInfo Graph::BuildDebugInfo() const {
GraphDebugInfoBuilder builder;
for (const std::string& function_name : flib_def().ListFunctionNames()) {
if (core::RefCountPtr<FunctionRecord> function_record =
flib_def().FindRecord(function_name)) {
builder.AccumulateStackTracesMap(function_record->stack_traces(),
absl::StrCat("@", function_name));
}
}
for (const Node* node : nodes()) {
if (node == nullptr || !node->IsOp()) {
continue;
}
const std::shared_ptr<AbstractStackTrace>& stack_trace =
node->GetStackTrace();
if (stack_trace != nullptr) {
builder.AccumulateStackTrace(stack_trace, node->name());
}
}
return builder.Build();
}
std::string Edge::DebugString() const {
auto src_name = src_ ? src_->name().c_str() : "<NULL>";
auto dst_name = dst_ ? dst_->name().c_str() : "<NULL>";
return strings::Printf("[id=%d %s:%d -> %s:%d]", id_, src_name, src_output_,
dst_name, dst_input_);
}
} | #include "tensorflow/core/graph/graph.h"
#include <memory>
#include <set>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
using ::testing::UnorderedElementsAre;
REGISTER_OP("OneInput").Input("x: float");
REGISTER_OP("OneOutput").Output("y: float");
REGISTER_OP("OneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float");
REGISTER_OP("TwoInputsOneOutput")
.Input("x: float")
.Input("y: float")
.Output("z: float");
class GraphTest : public ::testing::Test {
protected:
GraphTest() : graph_(OpRegistry::Global()) {}
~GraphTest() override {}
static void VerifyNodes(Node* node, const std::vector<Node*>& expected_in,
const std::vector<Node*>& expected_out) {
std::vector<Node*> in;
for (const Edge* e : node->in_edges()) {
in.push_back(e->src());
}
EXPECT_EQ(Stringify(expected_in), Stringify(in));
std::vector<Node*> out;
for (const Edge* e : node->out_edges()) {
out.push_back(e->dst());
}
EXPECT_EQ(Stringify(expected_out), Stringify(out));
}
std::unique_ptr<Edge> BuildEdge(int id = 0, Node* src = nullptr,
Node* dst = nullptr, int x = 0, int y = 0) {
Edge* e = new Edge;
e->id_ = id;
e->src_ = src;
e->dst_ = dst;
e->src_output_ = x;
e->dst_input_ = y;
return absl::WrapUnique(e);
}
void VerifyGraphStats() {
int nodes = 0;
for (const Node* n : graph_.nodes()) {
VLOG(1) << n->id();
++nodes;
}
EXPECT_EQ(nodes, graph_.num_nodes());
int edges = 0;
for (const Edge* e : graph_.edges()) {
VLOG(1) << e->id();
++edges;
}
EXPECT_EQ(edges, graph_.num_edges());
}
Node* AddNodeWithName(const string& name) {
Node* node;
TF_CHECK_OK(NodeBuilder(name, "NoOp").Finalize(&graph_, &node));
return node;
}
Node* FromNodeDef(const string& name, const string& node_type,
int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
void FromGraphDef(const string& gdef_ascii) {
GraphDef gdef;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef));
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef, &graph_));
}
Node* FindNode(const string& name) {
for (Node* node : graph_.nodes()) {
if (node->name() == name) return node;
}
LOG(FATAL) << name;
}
bool ControlEdgeExistsInGraphOrNodeDef(const Node* src, const Node* dst) {
for (const Edge* e : dst->in_edges()) {
if (e->IsControlEdge() && e->src() == src &&
e->src_output() == Graph::kControlSlot &&
e->dst_input() == Graph::kControlSlot) {
return true;
}
}
std::string control_edge_name = strings::StrCat("^", src->name());
for (int i = 0; i < dst->def().input_size(); ++i) {
if (dst->def().input(i) == control_edge_name) {
return true;
}
}
return false;
}
Graph graph_;
private:
static std::vector<string> Stringify(const std::vector<Node*>& nodes) {
std::vector<string> result;
result.reserve(nodes.size());
for (Node* n : nodes) {
result.push_back(n->DebugString());
}
std::sort(result.begin(), result.end());
return result;
}
};
namespace {
TEST_F(GraphTest, Constructor) {
Node* source = graph_.source_node();
EXPECT_NE(source, nullptr);
Node* sink = graph_.sink_node();
EXPECT_NE(sink, nullptr);
VerifyNodes(source, {}, {sink});
VerifyNodes(sink, {source}, {});
EXPECT_EQ(2, graph_.num_node_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, RemoveThenAdd) {
AddNodeWithName("A");
Node* b = AddNodeWithName("B");
const int b_id = b->id();
AddNodeWithName("C");
EXPECT_EQ(5, graph_.num_node_ids());
graph_.RemoveNode(b);
EXPECT_EQ(5, graph_.num_node_ids());
Node* d = AddNodeWithName("D");
EXPECT_NE(b_id, d->id());
EXPECT_EQ(6, graph_.num_node_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, InNodesAndOutNodes) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = AddNodeWithName("B");
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.RemoveNode(b);
Node* d = AddNodeWithName("D");
const Edge* source_to_a = graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
EXPECT_EQ("A", a->name());
VerifyNodes(a, {graph_.source_node()}, {c, graph_.sink_node()});
EXPECT_EQ("C", c->name());
VerifyNodes(c, {a}, {graph_.sink_node()});
EXPECT_EQ("D", d->name());
VerifyNodes(d, {}, {});
VerifyNodes(graph_.source_node(), {}, {a, graph_.sink_node()});
VerifyNodes(graph_.sink_node(), {a, c, graph_.source_node()}, {});
graph_.RemoveEdge(source_to_a);
VerifyNodes(a, {}, {c, graph_.sink_node()});
VerifyNodes(graph_.source_node(), {}, {graph_.sink_node()});
graph_.RemoveNode(c);
VerifyNodes(a, {}, {graph_.sink_node()});
VerifyNodes(graph_.sink_node(), {a, graph_.source_node()}, {});
EXPECT_EQ(6, graph_.num_node_ids());
EXPECT_EQ(5, graph_.num_edge_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, NodeByIndex) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.AddEdge(a, 0, c, 0);
const Node* a_copy;
TF_ASSERT_OK(c->input_node(0, &a_copy));
EXPECT_EQ(a, a_copy);
const Edge* e;
TF_ASSERT_OK(c->input_edge(0, &e));
EXPECT_EQ(0, e->dst_input());
EXPECT_EQ(a, e->src());
EXPECT_EQ(c, e->dst());
EXPECT_EQ(0, e->src_output());
Node* t = FromNodeDef("T", "TwoInputsOneOutput", 2);
graph_.AddEdge(a, 0, t, 0);
graph_.AddEdge(t, 0, t, 1);
const Node* t_0;
const Node* t_1;
TF_ASSERT_OK(t->input_node(0, &t_0));
EXPECT_EQ(a, t_0);
TF_ASSERT_OK(t->input_node(1, &t_1));
EXPECT_EQ(t, t_1);
TF_ASSERT_OK(t->input_edge(1, &e));
EXPECT_EQ(1, e->dst_input());
EXPECT_EQ(t, e->src());
std::vector<const Edge*> t_input_edges;
TF_ASSERT_OK(t->input_edges(&t_input_edges));
ASSERT_EQ(2, t_input_edges.size());
EXPECT_EQ(a, t_input_edges[0]->src());
EXPECT_EQ(e, t_input_edges[1]);
EXPECT_FALSE(c->input_node(1, &a_copy).ok());
EXPECT_FALSE(c->input_node(-1, &a_copy).ok());
graph_.RemoveNode(a);
Status s = c->input_node(0, &a_copy);
EXPECT_FALSE(s.ok());
Node* a_new = FromNodeDef("A_new", "OneOutput", 0);
Node* b_new = FromNodeDef("B_new", "OneOutput", 0);
graph_.AddEdge(a_new, 0, c, 0);
const Edge* a_new_c_edge;
TF_ASSERT_OK(c->input_edge(0, &a_new_c_edge));
graph_.AddEdge(b_new, 0, c, 0);
const Edge* b_new_c_edge;
TF_ASSERT_OK(c->input_edge(0, &b_new_c_edge));
graph_.RemoveEdge(a_new_c_edge);
TF_ASSERT_OK(c->input_edge(0, &b_new_c_edge));
std::vector<const Edge*> c_input_edges;
TF_ASSERT_OK(c->input_edges(&c_input_edges));
ASSERT_EQ(1, c_input_edges.size());
EXPECT_EQ(b_new_c_edge, c_input_edges[0]);
}
TEST_F(GraphTest, NodeIteration) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = AddNodeWithName("B");
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.RemoveNode(b);
Node* d = AddNodeWithName("D");
const Edge* source_to_a = graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
graph_.RemoveEdge(source_to_a);
graph_.RemoveNode(c);
std::set<string> expected;
expected.insert(graph_.source_node()->DebugString());
expected.insert(a->DebugString());
expected.insert(d->DebugString());
expected.insert(graph_.sink_node()->DebugString());
std::set<string> actual;
for (int id = 0; id < graph_.num_node_ids(); ++id) {
Node* node = graph_.FindNodeId(id);
if (node != nullptr) {
actual.insert(node->DebugString());
}
}
EXPECT_EQ(expected, actual);
actual.clear();
for (Node* node : graph_.nodes()) {
actual.insert(node->DebugString());
}
EXPECT_EQ(expected, actual);
VerifyGraphStats();
}
static void CheckType(Node* node, bool b) {
EXPECT_TRUE(b) << node->DebugString();
int count = 0;
if (node->IsSource()) count++;
if (node->IsSink()) count++;
if (node->IsOp()) count++;
EXPECT_EQ(1, count) << node->DebugString();
}
TEST_F(GraphTest, Type) {
Node* op = AddNodeWithName("A");
CheckType(graph_.source_node(), graph_.source_node()->IsSource());
CheckType(graph_.sink_node(), graph_.sink_node()->IsSink());
CheckType(op, op->IsOp());
VerifyGraphStats();
}
TEST_F(GraphTest, AddAttr) {
Node* n1 = AddNodeWithName("A");
n1->AddAttr("_a", "new_attr");
string attr;
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
Node* n2 = graph_.CopyNode(n1);
n1->AddAttr("_b", "new_attr_2");
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_b", &attr));
EXPECT_EQ("new_attr_2", attr);
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n2->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
EXPECT_NE(absl::OkStatus(), GetNodeAttr(n2->attrs(), "_b", &attr));
}
static string EdgeIter(const Graph& g) {
std::vector<std::pair<int, int> > edges;
for (const Edge* e : g.edges()) {
edges.push_back(std::make_pair(e->src()->id(), e->dst()->id()));
}
std::sort(edges.begin(), edges.end());
string result;
for (auto& p : edges) {
strings::StrAppend(&result, p.first, "->", p.second, ";");
}
return result;
}
TEST_F(GraphTest, EdgeIteration) {
EXPECT_EQ("0->1;", EdgeIter(graph_));
Node* a = FromNodeDef("A", "OneInputTwoOutputs", 1);
Node* b = FromNodeDef("B", "OneInput", 1);
EXPECT_EQ("0->1;", EdgeIter(graph_));
graph_.AddEdge(a, 0, b, 0);
EXPECT_EQ("0->1;2->3;", EdgeIter(graph_));
graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(b, graph_.sink_node());
EXPECT_EQ("0->1;0->2;2->3;3->1;", EdgeIter(graph_));
graph_.AddEdge(a, 1, a, 0);
EXPECT_EQ("0->1;0->2;2->2;2->3;3->1;", EdgeIter(graph_));
VerifyGraphStats();
}
TEST_F(GraphTest, NewName) {
string a1 = graph_.NewName("A");
string a2 = graph_.NewName("A");
string b1 = graph_.NewName("B");
EXPECT_NE(a1, a2);
EXPECT_NE(a1, b1);
EXPECT_NE(a2, b1);
EXPECT_TRUE(absl::StartsWith(a1, "A")) << a1;
}
TEST_F(GraphTest, IsValidNode) {
Node* g1_node1;
TF_CHECK_OK(NodeBuilder("g1_node1", "NoOp").Finalize(&graph_, &g1_node1));
Graph graph2(OpRegistry::Global());
Node* g2_node1;
Node* g2_node2;
TF_CHECK_OK(NodeBuilder("g2_node1", "NoOp").Finalize(&graph2, &g2_node1));
TF_CHECK_OK(NodeBuilder("g2_node2", "NoOp").Finalize(&graph2, &g2_node2));
Status s = graph_.IsValidNode(nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("Node is null"), s.message());
s = graph_.IsValidNode(g2_node2);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("node id 3 is >= than number of nodes in graph 3"),
s.message());
s = graph_.IsValidNode(g2_node1);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("Node with id 2 is different from the passed in node. "
"Does it belong to a different graph?"),
s.message());
}
TEST_F(GraphTest, AddControlEdge) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
Node* a = FindNode("A");
Node* b = FindNode("B");
Node* c = FindNode("C");
const Edge* edge = graph_.AddControlEdge(c, a);
ASSERT_TRUE(edge != nullptr);
EXPECT_EQ(edge->src(), c);
EXPECT_EQ(edge->src_output(), Graph::kControlSlot);
EXPECT_EQ(edge->dst(), a);
EXPECT_EQ(edge->dst_input(), Graph::kControlSlot);
ASSERT_EQ(a->def().input_size(), 1);
EXPECT_EQ(a->def().input(0), "^C");
edge = graph_.AddControlEdge(a, b);
EXPECT_TRUE(edge != nullptr);
ASSERT_EQ(b->def().input_size(), 2);
EXPECT_EQ(b->def().input(0), "A:0");
EXPECT_EQ(b->def().input(1), "^A");
edge = graph_.AddControlEdge(a, b);
EXPECT_TRUE(edge == nullptr);
EXPECT_EQ(b->def().input_size(), 2);
edge = graph_.AddControlEdge(a, b, true);
EXPECT_TRUE(edge != nullptr);
ASSERT_EQ(b->def().input_size(), 2);
EXPECT_EQ(b->def().input(0), "A:0");
EXPECT_EQ(b->def().input(1), "^A");
edge = graph_.AddControlEdge(graph_.source_node(), b);
EXPECT_TRUE(edge != nullptr);
EXPECT_EQ(b->def().input_size(), 2);
edge = graph_.AddControlEdge(graph_.source_node(), b);
EXPECT_TRUE(edge == nullptr);
EXPECT_EQ(b->def().input_size(), 2);
}
TEST_F(GraphTest, RemoveControlEdge) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
Node* a = FindNode("A");
Node* b = FindNode("B");
Node* c = FindNode("C");
const Edge* edge_1 = graph_.AddControlEdge(c, a);
const Edge* edge_2 = graph_.AddControlEdge(a, b);
ASSERT_TRUE(edge_1 != nullptr);
ASSERT_TRUE(edge_2 != nullptr);
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(a, b));
graph_.RemoveControlEdge(edge_1);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(a, b));
graph_.RemoveControlEdge(edge_2);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(a, b));
const Edge* edge_3 = graph_.AddControlEdge(c, a);
const Edge* edge_4 = graph_.AddControlEdge(c, a);
ASSERT_TRUE(edge_3 != nullptr);
ASSERT_TRUE(edge_4 == nullptr);
graph_.RemoveControlEdge(edge_3);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
}
TEST_F(GraphTest, UpdateEdge) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "OneInputTwoOutputs", 1);
Node* c = FromNodeDef("C", "OneInputTwoOutputs", 1);
Node* d = FromNodeDef("D", "OneInput", 1);
graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
graph_.AddEdge(c, 0, b, 0);
graph_.AddEdge(c, 1, d, 0);
EXPECT_EQ("0->1;0->2;2->1;2->4;4->1;4->3;4->5;", EdgeIter(graph_));
TF_EXPECT_OK(graph_.UpdateEdge(a, 0, b, 0));
EXPECT_EQ("0->1;0->2;2->1;2->3;2->4;4->1;4->5;", EdgeIter(graph_));
TF_EXPECT_OK(graph_.UpdateEdge(a, 0, d, 0));
EXPECT_EQ("0->1;0->2;2->1;2->3;2->4;2->5;4->1;", EdgeIter(graph_));
Status s = graph_.UpdateEdge(a, 1, d, 0);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
"Node 'A' (type: 'OneOutput', num of outputs: 1) does not have output 1");
s = graph_.UpdateEdge(c, 0, a, 0);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
"Node 'A' (type: 'OneOutput', num of inputs: 0) does not have input 0");
}
TEST_F(GraphTest, InputEdges) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "TwoInputsOneOutput", 2);
graph_.AddEdge(a, 0, b, 0);
std::vector<const Edge*> edges;
EXPECT_EQ(error::INVALID_ARGUMENT, b->input_edges(&edges).code());
graph_.AddEdge(a, 0, b, 1);
TF_EXPECT_OK(b->input_edges(&edges));
}
TEST_F(GraphTest, EdgeDebugString) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "OneInput", 1);
auto e = graph_.AddEdge(a, 0, b, 0);
auto s = e->DebugString();
EXPECT_EQ(s, "[id=1 A:0 -> B:0]");
auto e1 = BuildEdge();
auto s1 = e1->DebugString();
EXPECT_EQ(s1, "[id=0 <NULL>:0 -> <NULL>:0]");
auto e2 = BuildEdge(2, nullptr, b, 1, 1);
auto s2 = e2->DebugString();
EXPECT_EQ(s2, "[id=2 <NULL>:1 -> B:1]");
auto e3 = BuildEdge(3, a, nullptr, 2, 1);
auto s3 = e3->DebugString();
EXPECT_EQ(s3, "[id=3 A:2 -> <NULL>:1]");
}
TEST_F(GraphTest, AddFunctionLibrary) {
FunctionDefLibrary proto;
*proto.add_function() = test::function::XTimesTwo();
*proto.add_function() = test::function::XTimesFour();
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_TRUE(graph_.flib_def().Find("XTimesTwo") != nullptr);
EXPECT_TRUE(graph_.flib_def().Find("XTimesFour") != nullptr);
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_TRUE(graph_.flib_def().Find("XTimesTwo") != nullptr);
EXPECT_TRUE(graph_.flib_def().Find("XTimesFour") != nullptr);
FunctionDefLibrary error_proto = proto;
*error_proto.mutable_function(0)->add_node_def() =
error_proto.function(0).node_def(0);
Status s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot add function 'XTimesTwo' because a different function with "
"the same name already exists.");
error_proto = proto;
error_proto.mutable_function(0)->mutable_signature()->set_name("Add");
s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot add function 'Add' because an op with the same name "
"already exists.");
GradientDef* grad = proto.add_gradient();
grad->set_function_name("XTimesTwo");
grad->set_gradient_func("Undefined");
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_EQ(graph_.flib_def().FindGradient("XTimesTwo"), "Undefined");
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_EQ(graph_.flib_def().FindGradient("XTimesTwo"), "Undefined");
error_proto = proto;
error_proto.mutable_gradient(0)->set_gradient_func("Undefined2");
s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot assign gradient function 'Undefined2' to 'XTimesTwo' "
"because it already has gradient function 'Undefined'");
}
TEST_F(GraphTest, BuildNodeNameIndex) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
auto node_name_index = graph_.BuildNodeNameIndex();
EXPECT_EQ(node_name_index.size(), 5);
std::vector<string> node_names{"_SOURCE", "_SINK", "A", "B", "C"};
for (const string& node_name : node_names) {
EXPECT_NE(node_name_index.find(node_name), node_name_index.end());
EXPECT_EQ(node_name_index[node_name], FindNode(node_name));
}
}
TEST_F(GraphTest, Clear) {
const int num_nodes = 10;
const int num_edges_per_node = 2;
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
graph.Clear();
EXPECT_EQ(graph.num_nodes(), 2);
}
TEST_F(GraphTest, NodeFullType) {
FromNodeDef("A", "OneOutput", 0);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(TFT_FLOAT);
graph_.SetNodeType("A", node_t);
const FullTypeDef* ft;
graph_.NodeType("A", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
}
TEST_F(GraphTest, NodeShrinkTypeOutput) {
auto builder = NodeDefBuilder("while", "While");
builder = builder.Input({NodeDefBuilder::NodeOut("node_0", 0, DT_FLOAT),
NodeDefBuilder::NodeOut("node_1", 0, DT_INT32),
NodeDefBuilder::NodeOut("node_2", 0, DT_INT64),
NodeDefBuilder::NodeOut("node_3", 0, DT_STRING)});
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
for (FullTypeId id : {TFT_FLOAT, TFT_INT32, TFT_INT64, TFT_STRING}) {
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(id);
}
graph_.SetNodeType("while", node_t);
TF_CHECK_OK(
node->ShrinkTypeInfo({{1, 0}, {3, 1}}, "T", true));
std::vector<DataType> new_dtypes;
TF_CHECK_OK(GetNodeAttr(node->def(), "T", &new_dtypes));
ASSERT_EQ(new_dtypes.size(), 2);
EXPECT_EQ(new_dtypes[0], DT_INT32);
EXPECT_EQ(new_dtypes[1], DT_STRING);
const FullTypeDef* ft;
graph_.NodeType("while", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
ASSERT_EQ(ft->args_size(), 2);
ASSERT_EQ(ft->args(0).args_size(), 1);
EXPECT_EQ(ft->args(0).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft->args(1).args_size(), 1);
EXPECT_EQ(ft->args(1).args(0).type_id(), TFT_STRING);
}
TEST_F(GraphTest, NodeShrinkTypeInput) {
auto builder = NodeDefBuilder("if", "If");
builder = builder.Input("cond", 0, DT_BOOL);
builder = builder.Input({NodeDefBuilder::NodeOut("node_0", 0, DT_FLOAT),
NodeDefBuilder::NodeOut("node_1", 0, DT_INT32),
NodeDefBuilder::NodeOut("node_2", 0, DT_INT64),
NodeDefBuilder::NodeOut("node_3", 0, DT_STRING)});
builder = builder.Attr("Tout", "[DT_FLOAT, DT_INT32, DT_INT63, DT_STRING]");
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
for (FullTypeId id : {TFT_FLOAT, TFT_INT32, TFT_INT64, TFT_STRING}) {
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(id);
}
graph_.SetNodeType("if", node_t);
TF_CHECK_OK(node->ShrinkTypeInfo({{1, 0}, {3, 1}}, "Tin",
false));
std::vector<DataType> new_dtypes;
TF_CHECK_OK(GetNodeAttr(node->def(), "Tin", &new_dtypes));
ASSERT_EQ(new_dtypes.size(), 2);
EXPECT_EQ(new_dtypes[0], DT_INT32);
EXPECT_EQ(new_dtypes[1], DT_STRING);
const FullTypeDef* ft;
graph_.NodeType("if", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
ASSERT_EQ(ft->args_size(), 4);
ASSERT_EQ(ft->args(0).args_size(), 1);
EXPECT_EQ(ft->args(0).args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ft->args(1).args_size(), 1);
EXPECT_EQ(ft->args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft->args(2).args_size(), 1);
EXPECT_EQ(ft->args(2).args(0).type_id(), TFT_INT64);
ASSERT_EQ(ft->args(3).args_size(), 1);
EXPECT_EQ(ft->args(3).args(0).type_id(), TFT_STRING);
}
TEST(AddInput, AddsControlSlot) {
auto input_name = "input-name";
auto expected_input_name = absl::StrCat("^", input_name);
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, Graph::kControlSlot);
EXPECT_EQ(node_def.input(0), expected_input_name);
}
TEST(AddInput, AddsSourceSlotZero) {
auto input_name = "input-name";
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, 0);
EXPECT_EQ(node_def.input(0), input_name);
}
TEST(AddInput, AddsOtherSlots) {
auto input_name = "input-name";
int arbitrary_slot = 37;
auto expected_input_name =
absl::StrCat(input_name, ":", arbitrary_slot);
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, arbitrary_slot);
EXPECT_EQ(node_def.input(0), expected_input_name);
}
void BM_InEdgeIteration(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
for (const Node* node : graph.nodes()) {
for (auto e : node->in_edges()) {
sum += e->id();
}
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 16);
void BM_GraphCreation(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
sum += graph.num_node_ids();
}
VLOG(1) << sum;
}
BENCHMARK(BM_GraphCreation)->ArgPair(10, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 16);
void BM_ToGraphDef(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
sum += graph_def.node_size();
}
VLOG(1) << sum;
}
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 16);
void BM_RemoveNode(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
for (auto s : state) {
state.PauseTiming();
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
state.ResumeTiming();
for (Node* n : graph.op_nodes()) {
graph.RemoveNode(n);
}
}
}
BENCHMARK(BM_RemoveNode)->ArgPair(10, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(10, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(10, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(10, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 16);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6f622d4-32e5-476d-a55e-af17aa112a86 | cpp | google/tensorstore | index_array_slice_op | tensorstore/index_space/internal/index_array_slice_op.cc | tensorstore/index_space/index_array_slice_op_test.cc | #include "tensorstore/index_space/internal/index_array_slice_op.h"
#include <algorithm>
#include <numeric>
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
bool BroadcastSizes(Index source, Index* result) {
if (source == *result) return true;
if (*result == 1) {
*result = source;
return true;
} else if (source == 1) {
return true;
}
return false;
}
bool BroadcastShapes(span<const Index> source_shape, span<Index> result_shape) {
if (source_shape.size() != result_shape.size()) return false;
for (DimensionIndex i = 0; i < source_shape.size(); ++i) {
if (!BroadcastSizes(source_shape[i], &result_shape[i])) return false;
}
return true;
}
template <typename GetNewDimensionShapeFn, typename GetIndexArrayBasePointerFn,
typename GetIndexArrayByteStrideFn>
Result<TransformRep::Ptr<>> MakeTransformFromJointIndexArrays(
DimensionIndex num_new_dims, TransformRep* orig_transform,
DimensionIndexBuffer* dimensions,
GetNewDimensionShapeFn get_new_dimension_bounds,
GetIndexArrayBasePointerFn get_index_array_base_pointer,
GetIndexArrayByteStrideFn get_index_array_byte_stride) {
const DimensionIndex num_indexed_dims = dimensions->size();
const DimensionIndex output_rank = orig_transform->input_rank;
const DimensionIndex input_rank =
output_rank - dimensions->size() + num_new_dims;
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(input_rank));
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
span<OutputIndexMap> maps = result->output_index_maps().first(output_rank);
const DimensionIndex num_preserved_dims = output_rank - num_indexed_dims;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
maps[output_dim].SetSingleInputDimension(0);
}
const auto input_domain = result->input_domain(input_rank);
for (DimensionIndex new_dim = 0; new_dim < num_new_dims; ++new_dim) {
input_domain[new_dim] = get_new_dimension_bounds(new_dim);
}
for (DimensionIndex indexed_dim = 0; indexed_dim < num_indexed_dims;
++indexed_dim) {
const DimensionIndex output_dim = (*dimensions)[indexed_dim];
auto& map = maps[output_dim];
map.offset() = 0;
map.stride() = 1;
auto& index_array_data = map.SetArrayIndexing(input_rank);
std::fill_n(index_array_data.byte_strides + num_new_dims,
num_preserved_dims, 0);
for (DimensionIndex new_dim = 0; new_dim < num_new_dims; ++new_dim) {
index_array_data.byte_strides[new_dim] =
get_index_array_byte_stride(indexed_dim, new_dim);
}
index_array_data.element_pointer =
get_index_array_base_pointer(indexed_dim);
}
for (DimensionIndex output_dim = 0, input_dim = num_new_dims;
output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
map.SetSingleInputDimension(input_dim);
map.offset() = 0;
map.stride() = 1;
result->input_dimension(input_dim) =
orig_transform->input_dimension(output_dim);
++input_dim;
}
if (IsDomainExplicitlyEmpty(result.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(result.get());
}
dimensions->resize(num_new_dims);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromIndexArrays(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays) {
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_arrays.size() != num_indexed_dims) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", num_indexed_dims,
") does not equal number of index arrays (", index_arrays.size(), ")"));
}
if (index_arrays.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("At least one index array must be specified"));
}
Index shape[kMaxRank];
const DimensionIndex num_new_dims = index_arrays[0].rank();
std::fill_n(&shape[0], num_new_dims, Index(1));
bool error = false;
for (DimensionIndex i = 0; i < index_arrays.size(); ++i) {
if (!BroadcastShapes(index_arrays[i].shape(),
span<Index>(&shape[0], num_new_dims))) {
error = true;
}
}
if (error) {
std::string shape_msg;
for (DimensionIndex i = 0; i < index_arrays.size(); ++i) {
tensorstore::StrAppend(&shape_msg, (shape_msg.empty() ? "" : ", "),
index_arrays[i].shape());
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Index arrays with shapes ", shape_msg,
" cannot be broadcast to a common shape"));
}
const auto get_new_dimension_bounds = [&](DimensionIndex new_dim) {
return IndexInterval::UncheckedSized(0, shape[new_dim]);
};
const auto get_index_array_base_pointer = [&](DimensionIndex indexed_dim) {
return index_arrays[indexed_dim].pointer();
};
const auto get_index_array_byte_stride = [&](DimensionIndex indexed_dim,
DimensionIndex new_dim) {
return index_arrays[indexed_dim].shape()[new_dim] == 1
? 0
: index_arrays[indexed_dim].byte_strides()[new_dim];
};
return MakeTransformFromJointIndexArrays(
num_new_dims, orig_transform, dimensions, get_new_dimension_bounds,
get_index_array_base_pointer, get_index_array_byte_stride);
}
Result<TransformRep::Ptr<>> MakeTransformFromOuterIndexArrays(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays) {
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_arrays.size() != num_indexed_dims) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", num_indexed_dims,
") does not equal number of index arrays (", index_arrays.size(), ")"));
}
const DimensionIndex output_rank = orig_transform->input_rank;
DimensionIndex input_rank = output_rank - num_indexed_dims;
for (const auto& index_array : index_arrays) {
input_rank += index_array.rank();
}
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(input_rank));
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
DimensionIndex index_array_start_dim[kMaxRank];
DimensionIndex index_array_order[kMaxRank];
std::iota(&index_array_order[0], &index_array_order[num_indexed_dims],
static_cast<DimensionIndex>(0));
std::sort(&index_array_order[0], &index_array_order[num_indexed_dims],
[&](DimensionIndex a, DimensionIndex b) {
return (*dimensions)[a] < (*dimensions)[b];
});
span<Index> input_origin = result->input_origin().first(input_rank);
span<Index> input_shape = result->input_shape().first(input_rank);
span<OutputIndexMap> maps = result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0, reordered_indexed_dim = 0, input_dim = 0;
output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
map.stride() = 1;
map.offset() = 0;
if (reordered_indexed_dim < num_indexed_dims) {
const DimensionIndex indexed_dim =
index_array_order[reordered_indexed_dim];
if ((*dimensions)[indexed_dim] == output_dim) {
index_array_start_dim[indexed_dim] = input_dim;
const auto& array = index_arrays[indexed_dim];
MutableBoxView<>(input_origin.subspan(input_dim, array.rank()),
input_shape.subspan(input_dim, array.rank()))
.DeepAssign(array.domain());
const DimensionIndex end_input_dim = input_dim + array.rank();
if (array.num_elements() == 1) {
map.SetConstant();
map.offset() = *array.data();
map.stride() = 0;
} else {
auto& index_array_data = map.SetArrayIndexing(input_rank);
index_array_data.element_pointer = array.element_pointer();
std::fill_n(index_array_data.byte_strides, input_dim, 0);
std::copy(array.byte_strides().begin(), array.byte_strides().end(),
index_array_data.byte_strides + input_dim);
std::fill(index_array_data.byte_strides + end_input_dim,
index_array_data.byte_strides + input_rank, 0);
}
input_dim = end_input_dim;
++reordered_indexed_dim;
continue;
}
}
result->input_dimension(input_dim) =
orig_transform->input_dimension(output_dim);
map.SetSingleInputDimension(input_dim);
++input_dim;
}
if (IsDomainExplicitlyEmpty(result.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(result.get());
}
dimensions->clear();
dimensions->reserve(input_rank - output_rank);
for (DimensionIndex indexed_dim = 0; indexed_dim < num_indexed_dims;
++indexed_dim) {
const DimensionIndex start_input_dim = index_array_start_dim[indexed_dim];
for (DimensionIndex
input_dim = start_input_dim,
end_input_dim = start_input_dim + index_arrays[indexed_dim].rank();
input_dim != end_input_dim; ++input_dim) {
dimensions->push_back(input_dim);
}
}
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromIndexVectorArray(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array) {
TENSORSTORE_ASSIGN_OR_RETURN(
vector_dimension,
NormalizeDimensionIndex(vector_dimension, index_vector_array.rank()));
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_vector_array.shape()[vector_dimension] != num_indexed_dims) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of selected dimensions (", num_indexed_dims,
") does not equal index vector length (",
index_vector_array.shape()[vector_dimension], ")"));
}
const DimensionIndex num_new_dims = index_vector_array.rank() - 1;
const auto get_index_vector_array_dim = [&](DimensionIndex new_dim) {
return new_dim >= vector_dimension ? new_dim + 1 : new_dim;
};
const auto get_new_dimension_bounds = [&](DimensionIndex new_dim) {
return index_vector_array.domain()[get_index_vector_array_dim(new_dim)];
};
const auto get_index_array_base_pointer = [&](DimensionIndex indexed_dim) {
return std::shared_ptr<const Index>(
index_vector_array.pointer(),
index_vector_array.byte_strided_pointer() +
index_vector_array.byte_strides()[vector_dimension] * indexed_dim);
};
const auto get_index_array_byte_stride = [&](DimensionIndex indexed_dim,
DimensionIndex new_dim) {
const DimensionIndex index_vector_array_dim =
get_index_vector_array_dim(new_dim);
return index_vector_array.shape()[index_vector_array_dim] == 1
? 0
: index_vector_array.byte_strides()[index_vector_array_dim];
};
return MakeTransformFromJointIndexArrays(
num_new_dims, orig_transform, dimensions, get_new_dimension_bounds,
get_index_array_base_pointer, get_index_array_byte_stride);
}
}
Result<IndexTransform<>> ApplyIndexArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays, bool outer_indexing,
bool domain_only) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto other_transform,
outer_indexing
? MakeTransformFromOuterIndexArrays(TransformAccess::rep(transform),
dimensions, index_arrays)
: MakeTransformFromIndexArrays(TransformAccess::rep(transform),
dimensions, index_arrays));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_rep,
ComposeTransforms(TransformAccess::rep(transform),
false, other_transform.get(),
true, domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
Result<IndexTransform<>> ApplyIndexVectorArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array, bool domain_only) {
TENSORSTORE_ASSIGN_OR_RETURN(auto other_transform,
MakeTransformFromIndexVectorArray(
TransformAccess::rep(transform), dimensions,
vector_dimension, index_vector_array));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_rep,
ComposeTransforms(TransformAccess::rep(transform),
false, other_transform.get(),
true, domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(IndexArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 2})
.input_shape({2, 3, 4})
.input_labels({"", "", "y"})
.output_index_array(
0, 0, 1, MakeArray<Index>({{{1}, {2}, {3}}, {{4}, {5}, {6}}}),
IndexInterval::Sized(0, 7))
.output_single_input_dimension(1, 2)
.output_index_array(
2, 0, 1, MakeArray<Index>({{{7}, {8}, {9}}, {{0}, {1}, {2}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{1, 3, 7}, {0, 0, 3}},
{{2, 3, 8}, {0, 1, 3}},
{{3, 3, 9}, {0, 2, 3}},
{{6, 3, 2}, {1, 2, 3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).IndexArraySlice(MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}),
MakeArray<Index>({{7, 8, 9}, {0, 1, 2}})),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(
original_transform,
Dims("x", "z").IndexArraySlice(MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}),
MakeArray<Index>({{7, 8, 9}, {0, 1, 2}})),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexVectorArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 2})
.input_shape({2, 3, 4})
.input_labels({"", "", "y"})
.output_index_array(
0, 0, 1, MakeArray<Index>({{{1}, {2}, {3}}, {{4}, {5}, {6}}}),
IndexInterval::Sized(0, 7))
.output_single_input_dimension(1, 2)
.output_index_array(
2, 0, 1, MakeArray<Index>({{{7}, {8}, {9}}, {{0}, {1}, {2}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 8}, {0, 1, 3}},
{{6, 3, 2}, {1, 2, 3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexVectorArraySlice(
MakeArray<Index>({{{1, 7}, {2, 8}, {3, 9}},
{{4, 0}, {5, 1}, {6, 2}}}),
-1),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(original_transform,
Dims("x", "z").IndexVectorArraySlice(
MakeArray<Index>({{{1, 7}, {2, 8}, {3, 9}},
{{4, 0}, {5, 1}, {6, 2}}}),
-1),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexArrayOuterIndexArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({4, 2, 0})
.input_shape({5, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 3>()
.input_origin({0, 2, 0, 0})
.input_shape({2, 4, 2, 2})
.input_labels({"", "y", "", ""})
.output_index_array(0, 0, 1, MakeArray<Index>({{{{6}}}, {{{7}}}}),
IndexInterval::Sized(4, 5))
.output_single_input_dimension(1, 1)
.output_index_array(2, 0, 1, MakeArray<Index>({{{{2, 3}, {4, 5}}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{6, 3, 3}, {0, 3, 0, 1}},
{{7, 3, 4}, {1, 3, 1, 0}},
};
TestDimExpression(
original_transform,
Dims(2, 0).OuterIndexArraySlice(MakeArray<Index>({{2, 3}, {4, 5}}),
MakeArray<Index>({6, 7})),
{2, 3, 0},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(
original_transform,
Dims("z", "x").OuterIndexArraySlice(MakeArray<Index>({{2, 3}, {4, 5}}),
MakeArray<Index>({6, 7})),
{2, 3, 0},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexArraySliceTest, OneDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(MakeArray<Index>({1, 2})),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, 0, 1, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, -2, -3, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexArraySliceTest, ZeroElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(
tensorstore::AllocateArray<Index>({5, 0, 3})),
{0, 1, 2},
IndexTransformBuilder<4, 2>()
.input_origin({0, 0, 0, -100})
.input_shape({5, 0, 3, 200})
.output_constant(0, 0)
.output_single_input_dimension(1, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({0, 0, 0, -100})
.input_shape({5, 0, 3, 200})
.output_constant(0, -2)
.output_single_input_dimension(1, 10, 11, 3)
.Finalize()
.value(),
{},
false);
}
TEST(IndexArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(MakeArray<Index>({{5}})),
{0, 1},
IndexTransformBuilder<3, 2>()
.input_origin({0, 0, -100})
.input_shape({1, 1, 200})
.output_constant(0, 5)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({0, 0, -100})
.input_shape({1, 1, 200})
.output_constant(0, -17)
.output_single_input_dimension(1, 10, 11, 2)
.Finalize()
.value(),
{{{5, 6}, {0, 0, 6}}},
false);
}
TEST(IndexArraySliceTest, OneDOutputOneDArrayLabeled) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0)
.IndexArraySlice(MakeArray<Index>({1, 2}))
.Label("index"),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, -2, -3,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexArraySliceTest, TwoDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims()
.IndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4}))
.Label("index"),
{0},
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, -2, -3, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0}}, {{2, 4}, {1}}},
false);
}
TEST(IndexArraySliceTest, TwoDOutputOneDArrayBroadcast) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims().IndexArraySlice(MakeArray<Index>({{1, 2}}),
MakeArray<Index>({{3}, {4}})),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2}}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({{3}, {4}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, -2, -3, MakeArray<Index>({{1, 2}}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({{3}, {4}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0, 0}}, {{1, 4}, {1, 0}}, {{2, 4}, {1, 1}}},
false);
}
TEST(IndexArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0}))
.IndexArraySlice(MakeArray<Index>({1, 2}), MakeArray<Index>({3, 4})),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal number of index "
"arrays \\(2\\)");
TestDimExpressionError(
IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(span<const DimensionIndex>())
.IndexArraySlice(span<const SharedArrayView<const Index>>()),
absl::StatusCode::kInvalidArgument,
"At least one index array must be specified");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0, 1).IndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4, 5})),
absl::StatusCode::kInvalidArgument,
"Index arrays with shapes \\{2\\}, \\{3\\} cannot be broadcast "
"to a common shape");
}
TEST(IndexArraySliceTest, InvalidRank) {
auto index_array = tensorstore::AllocateArray<Index>(
std::vector<Index>(32, 1), tensorstore::c_order, tensorstore::value_init);
TestDimExpressionError(tensorstore::IdentityTransform(2),
Dims(0).IndexArraySlice(index_array),
absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]");
}
TEST(IndexVectorArraySliceTest, OneDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({{1, 2}}),
0),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, -2, -3,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({{1}}), 0),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({1, 200})
.output_constant(0, 1)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({1, 200})
.output_constant(0, -5)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, OneDOutputOneDArrayLabeled) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0)
.IndexVectorArraySlice(MakeArray<Index>({{1, 2}}), 0)
.Label("index"),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, 0, 1, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, -2, -3, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, TwoDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims()
.IndexVectorArraySlice(
MakeArray<Index>({{1, 3}, {2, 4}}), -1)
.Label("index"),
{0},
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, -2, -3, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0}}, {{2, 4}, {1}}},
false);
}
TEST(IndexVectorArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({1, 2}), 0),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal index vector "
"length \\(2\\)");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({1, 2}), 1),
absl::StatusCode::kInvalidArgument,
"Dimension index 1 is outside valid range \\[-1, 1\\)");
}
TEST(IndexVectorArraySliceTest, InvalidRank) {
TestDimExpressionError(
tensorstore::IdentityTransform(4),
Dims(0, 1).IndexVectorArraySlice(
tensorstore::AllocateArray<Index>({1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2},
tensorstore::c_order,
tensorstore::default_init),
-1),
absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]");
}
TEST(OuterIndexArraySliceTest, Integration) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({-10, -100, -2})
.input_shape({21, 200, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 6, 5, 1)
.output_single_input_dimension(2, -4, 2, 2)
.Finalize()
.value(),
Dims(0, 2).OuterIndexArraySlice(
MakeArray<Index>({{3, 4, 5}, {8, 9, 10}}),
MakeArray<Index>({1, 2})),
{0, 1, 3},
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 2})
.output_index_array(
0, 0, 1,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 2)
.output_index_array(2, 0, 1,
MakeArray<Index>({{{{1, 2}}}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 2})
.output_index_array(
0, -2, -3,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 6, 5, 2)
.output_index_array(2, -4, 2,
MakeArray<Index>({{{{1, 2}}}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{3, 5, 1}, {0, 0, 5, 0}},
{{9, 5, 2}, {1, 1, 5, 1}},
{{8, 5, 2}, {1, 0, 5, 1}},
{{10, 5, 2}, {1, 2, 5, 1}}},
false);
}
TEST(OuterIndexArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({-10, -100, -2})
.input_shape({21, 200, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 6, 5, 1)
.output_single_input_dimension(2, -4, 2, 2)
.Finalize()
.value(),
Dims(0, 2).OuterIndexArraySlice(
MakeArray<Index>({{3, 4, 5}, {8, 9, 10}}),
MakeArray<Index>({1})),
{0, 1, 3},
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 1})
.output_index_array(
0, 0, 1,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 2)
.output_constant(2, 1)
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 1})
.output_index_array(
0, -2, -3,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 6, 5, 2)
.output_constant(2, -2)
.Finalize()
.value(),
{{{3, 5, 1}, {0, 0, 5, 0}}},
false);
}
TEST(OuterIndexArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0}))
.OuterIndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4})),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal number of index "
"arrays \\(2\\)");
}
TEST(OuterIndexArraySliceTest, InvalidRank) {
auto index_array = tensorstore::AllocateArray<Index>(
std::vector<Index>(17, 1), tensorstore::c_order, tensorstore::value_init);
TestDimExpressionError(
tensorstore::IdentityTransform(2),
Dims(0, 1).OuterIndexArraySlice(index_array, index_array),
absl::StatusCode::kInvalidArgument,
"Rank 34 is outside valid range \\[0, 32\\]");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/index_array_slice_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_array_slice_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c5e4d7d2-b829-43c7-a717-d8e1a08b6a64 | cpp | google/arolla | meta | arolla/util/meta.h | arolla/util/meta_test.cc | #ifndef AROLLA_UTIL_META_H_
#define AROLLA_UTIL_META_H_
#include <cstddef>
#include <functional>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
namespace arolla::meta {
template <class T>
struct type_ {
using type = T;
};
template <typename T>
using type = type_<T>;
template <class... TypeTraits>
struct type_list {
using tuple = std::tuple<TypeTraits...>;
};
template <class L>
struct head;
template <class T, class... Ts>
struct head<type_list<T, Ts...>> : type<T> {};
template <class L>
using head_t = typename head<L>::type;
template <class L>
struct tail;
template <class T, class... Ts>
struct tail<type_list<T, Ts...>> {
using type = type_list<Ts...>;
};
template <class L>
using tail_t = typename tail<L>::type;
template <class L1, class L2>
struct concat;
template <class... Ts1, class... Ts2>
struct concat<type_list<Ts1...>, type_list<Ts2...>> {
using type = type_list<Ts1..., Ts2...>;
};
template <class L1, class L2>
using concat_t = typename concat<L1, L2>::type;
template <typename Fn, typename... Ts>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline constexpr void foreach_type(
type_list<Ts...>, Fn fn) {
(fn(type<Ts>()), ...);
}
template <typename TypeList, typename Fn>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline constexpr void foreach_type(Fn fn) {
foreach_type(TypeList(), fn);
}
template <typename TypeList, typename T>
struct contains : std::false_type {};
template <typename T, typename... Ts>
struct contains<type_list<Ts...>, T>
: std::disjunction<std::is_same<T, Ts>...> {};
template <typename TypeList, typename T>
constexpr bool contains_v = contains<TypeList, T>::value;
template <typename T>
struct function_traits : public function_traits<decltype(&T::operator())> {};
template <typename CLS, typename RES, typename... ARGs>
struct function_traits<RES (CLS::*)(ARGs...) const> {
static constexpr int arity = sizeof...(ARGs);
using arg_types = type_list<ARGs...>;
using return_type = RES;
};
template <typename CLS, typename RES, typename... ARGs>
struct function_traits<RES (CLS::*)(ARGs...)> {
static constexpr int arity = sizeof...(ARGs);
using arg_types = type_list<ARGs...>;
using return_type = RES;
};
template <typename RES, typename... ARGs>
struct function_traits<RES (*)(ARGs...)> {
static constexpr int arity = sizeof...(ARGs);
using arg_types = type_list<ARGs...>;
using return_type = RES;
};
template <typename F>
struct function_traits<std::reference_wrapper<F>> : function_traits<F> {};
template <template <typename> class Wrapper, class T>
struct is_wrapped_with : public std::false_type {};
template <template <typename> class Wrapper, class T>
struct is_wrapped_with<Wrapper, Wrapper<T>> : public std::true_type {};
template <template <typename> class Wrapper, class T>
constexpr bool is_wrapped_with_v = is_wrapped_with<Wrapper, T>::value;
template <template <typename> class Wrapper, class T>
struct strip_template {
using type = T;
};
template <template <typename> class Wrapper, class T>
struct strip_template<Wrapper, Wrapper<T>> {
using type = T;
};
template <template <typename> class Wrapper, class T>
using strip_template_t = typename strip_template<Wrapper, T>::type;
template <typename T, typename ArgsTypeList, class = void>
struct has_create_method_impl : std::false_type {};
template <class T, class... Args>
struct has_create_method_impl<
T, type_list<Args...>,
std::void_t<decltype(T::Create(std::declval<Args>()...))>>
: std::true_type {};
template <class T, class... Args>
struct has_create_method
: public has_create_method_impl<T, type_list<Args...>> {};
template <class T, class... Args>
constexpr bool has_create_method_v = has_create_method<T, Args...>::value;
template <typename Tuple, typename Fn, size_t... Is>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void foreach_tuple_element_impl(
ABSL_ATTRIBUTE_UNUSED Tuple&& tuple, ABSL_ATTRIBUTE_UNUSED Fn&& fn,
std::index_sequence<Is...>) {
(fn(std::get<Is>(std::forward<Tuple>(tuple))), ...);
}
template <typename Tuple, typename Fn>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void foreach_tuple_element(Tuple&& tuple,
Fn&& fn) {
static_assert(std::tuple_size_v<std::decay_t<Tuple>> >= 0,
"expected a tuple");
foreach_tuple_element_impl(
std::forward<Tuple>(tuple), std::forward<Fn>(fn),
std::make_index_sequence<std::tuple_size_v<std::decay_t<Tuple>>>());
}
template <typename Tuple, typename Fn, size_t... Is>
void foreach_tuple_element_type_impl(ABSL_ATTRIBUTE_UNUSED Fn&& fn,
std::index_sequence<Is...>) {
(fn(::arolla::meta::type<std::tuple_element_t<Is, Tuple>>()), ...);
}
template <typename Tuple, typename Fn>
void foreach_tuple_element_type(Fn fn) {
static_assert(std::tuple_size_v<Tuple> >= 0, "expected a tuple");
foreach_tuple_element_type_impl<Tuple>(
std::forward<Fn>(fn),
std::make_index_sequence<std::tuple_size_v<Tuple>>());
}
template <class, class = void>
struct is_transparent : std::false_type {};
template <class T>
struct is_transparent<T, std::void_t<typename T::is_transparent>>
: std::true_type {};
template <typename T>
constexpr bool is_transparent_v = is_transparent<T>::value;
}
#endif | #include "arolla/util/meta.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <tuple>
#include <type_traits>
#include <typeindex>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/statusor.h"
namespace arolla::meta {
namespace {
using ::testing::ElementsAre;
TEST(MetaTest, TypeList) {
static_assert(
std::is_same_v<head_t<type_list<int64_t, int32_t, int8_t>>, int64_t>);
static_assert(std::is_same_v<tail_t<type_list<int64_t, int32_t, int8_t>>,
type_list<int32_t, int8_t>>);
static_assert(contains_v<type_list<int64_t, int32_t, int8_t>, int32_t>);
static_assert(
contains_v<type_list<int64_t, int32_t, int32_t, int8_t>, int32_t>);
static_assert(!contains_v<type_list<int64_t, int32_t, int8_t>, float>);
static_assert(!contains_v<type_list<>, float>);
}
TEST(MetaTest, ForeachType) {
using meta_int_list =
type_list<std::integral_constant<int, 1>, std::integral_constant<int, 2>,
std::integral_constant<int, 4>, std::integral_constant<int, 8>>;
int value = 0;
foreach_type<meta_int_list>(
[&value](auto meta_type) { value ^= decltype(meta_type)::type::value; });
EXPECT_EQ(value, 15);
}
TEST(MetaTest, FunctionTraits) {
int64_t someValue = 57;
auto lambda = [&someValue](int32_t, double) {
return static_cast<float>(someValue);
};
using lambda_traits = function_traits<decltype(lambda)>;
static_assert(lambda_traits::arity == 2);
static_assert(std::is_same<typename lambda_traits::arg_types,
type_list<int32_t, double>>::value);
static_assert(
std::is_same<typename lambda_traits::return_type, float>::value);
struct ConstFunctor {
float operator()(int32_t, double) const { return 57; }
};
using const_functor_traits = function_traits<ConstFunctor>;
static_assert(const_functor_traits::arity == 2);
static_assert(std::is_same<typename const_functor_traits::arg_types,
type_list<int32_t, double>>::value);
static_assert(
std::is_same<typename const_functor_traits::return_type, float>::value);
struct NonConstFunctor {
float operator()(int32_t, double) { return 57; }
};
using non_const_functor_traits = function_traits<NonConstFunctor>;
static_assert(non_const_functor_traits::arity == 2);
static_assert(std::is_same<typename non_const_functor_traits::arg_types,
type_list<int32_t, double>>::value);
static_assert(std::is_same<typename non_const_functor_traits::return_type,
float>::value);
using ref_traits = function_traits<std::reference_wrapper<ConstFunctor>>;
static_assert(std::is_same<typename ref_traits::return_type, float>::value);
}
TEST(MetaTest, is_wrapped_with) {
static_assert(is_wrapped_with<std::optional, std::optional<float>>::value);
static_assert(
is_wrapped_with<::absl::StatusOr, ::absl::StatusOr<float>>::value);
}
TEST(MetaTest, concat) {
static_assert(
std::is_same_v<concat_t<type_list<>, type_list<>>, type_list<>>);
static_assert(
std::is_same_v<concat_t<type_list<>, type_list<void>>, type_list<void>>);
static_assert(
std::is_same_v<concat_t<type_list<int>, type_list<>>, type_list<int>>);
static_assert(std::is_same_v<concat_t<type_list<int>, type_list<void>>,
type_list<int, void>>);
static_assert(std::is_same_v<
concat_t<type_list<int, char, bool>, type_list<void, char>>,
type_list<int, char, bool, void, char>>);
}
TEST(MetaTest, has_create_method) {
struct A {
explicit A(int v) : value(v) {}
int value;
};
struct B {
static B Create(int v) { return B{.value = v}; }
int value;
};
static_assert(!has_create_method_v<A, int>);
static_assert(!has_create_method_v<B>);
static_assert(!has_create_method_v<B, char*>);
static_assert(has_create_method_v<B, int>);
static_assert(has_create_method_v<B, int16_t>);
}
TEST(MetaTest, foreach_tuple_element) {
{
auto tuple = std::tuple();
meta::foreach_tuple_element(tuple, [](auto&) { FAIL(); });
}
{
std::vector<const void*> result;
auto tuple = std::tuple(1, 2.5, "foo");
meta::foreach_tuple_element(tuple, [&](auto& x) { result.push_back(&x); });
EXPECT_THAT(result, ElementsAre(&std::get<0>(tuple), &std::get<1>(tuple),
&std::get<2>(tuple)));
}
}
TEST(MetaTest, foreach_tuple_element_type) {
{
using Tuple = std::tuple<>;
meta::foreach_tuple_element_type<Tuple>([](auto) { FAIL(); });
}
{
using Tuple = std::tuple<int, float, const char*>;
std::vector<std::type_index> result;
meta::foreach_tuple_element_type<Tuple>([&](auto meta_type) {
result.push_back(typeid(typename decltype(meta_type)::type));
});
EXPECT_THAT(result, ElementsAre(std::type_index(typeid(int)),
std::type_index(typeid(float)),
std::type_index(typeid(const char*))));
}
}
TEST(MetaTest, is_transparent) {
EXPECT_TRUE(is_transparent_v<std::equal_to<>>);
EXPECT_FALSE(is_transparent_v<std::equal_to<int>>);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/meta.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/meta_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e0e3962d-5ba1-46ae-a510-9b91e916ddc4 | cpp | google/arolla | map | arolla/util/map.h | arolla/util/map_test.cc | #ifndef AROLLA_UTIL_MAP_H_
#define AROLLA_UTIL_MAP_H_
#include <algorithm>
#include <vector>
namespace arolla {
template <class Map>
std::vector<typename Map::key_type> SortedMapKeys(const Map& map) {
std::vector<typename Map::key_type> result;
result.reserve(map.size());
for (const auto& item : map) {
result.push_back(item.first);
}
std::sort(result.begin(), result.end());
return result;
}
}
#endif | #include "arolla/util/map.h"
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
using ::testing::TypedEq;
struct V {};
TEST(SortedMapKeys, small) {
EXPECT_THAT(SortedMapKeys(absl::flat_hash_map<int, V>{}), ElementsAre());
EXPECT_THAT(SortedMapKeys(absl::flat_hash_map<char, V>{{0, V{}}}),
ElementsAre(TypedEq<char>(0)));
EXPECT_THAT(SortedMapKeys(
absl::flat_hash_map<std::string, V>{{"1", V{}}, {"0", V{}}}),
ElementsAre("0", "1"));
}
TEST(SortedMapKeys, big) {
absl::flat_hash_map<std::string, V> m;
for (int i = 1000; i != 10000; ++i) {
m.emplace(std::to_string(i), V{});
}
std::vector<std::string> keys = SortedMapKeys(m);
EXPECT_EQ(keys.size(), 9000);
auto it = keys.begin();
for (int i = 1000; i != 10000; ++i, ++it) {
EXPECT_EQ(*it, std::to_string(i));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/map.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/map_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
692904ef-2ba2-4fdc-9174-f6b95754dfd7 | cpp | tensorflow/tensorflow | p2p_schedule_preparation | third_party/xla/xla/service/p2p_schedule_preparation.cc | third_party/xla/xla/service/p2p_schedule_preparation_test.cc | #include "xla/service/p2p_schedule_preparation.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsP2POp(const HloInstruction* op) {
auto p2p = DynCast<HloSendRecvInstruction>(op);
return p2p != nullptr && !p2p->is_host_transfer();
}
bool IsCollectiveOp(const HloInstruction* op) {
HloOpcode opcode = op->opcode();
if (opcode == HloOpcode::kCustomCall) {
return true;
}
return hlo_query::IsAsyncCollectiveDoneOp(op, true) ||
(hlo_query::IsCollectiveCommunicationOp(opcode) &&
!hlo_query::IsAsyncCollectiveStartOp(op, true));
}
HloInstruction* GetStartOpForDoneOp(HloInstruction* op) {
switch (op->opcode()) {
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGatherDone:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
return op->mutable_operand(0);
default:
return op;
}
}
enum P2PGroupKind { kUnpipelined = 0, kPipelined = 1, kUnrecognized = 2 };
enum P2PRuntimeStream { kUnknown = 0, kStream0 = 1, kStream1 = 2 };
struct P2PGroupNode {
bool RecordParentComputation(HloComputation* parent) {
if (computation == nullptr) {
computation = parent;
return true;
}
return computation == parent;
}
bool RecordP2POp(HloSendRecvInstruction* p2p) {
if (!RecordParentComputation(p2p->parent())) {
return false;
}
switch (p2p->opcode()) {
case HloOpcode::kRecvDone:
if (recv_done == nullptr) {
recv_done = Cast<HloRecvDoneInstruction>(p2p);
return true;
}
break;
case HloOpcode::kSendDone:
if (send_done == nullptr) {
send_done = Cast<HloSendDoneInstruction>(p2p);
return true;
}
break;
case HloOpcode::kRecv:
if (recv == nullptr) {
recv = Cast<HloRecvInstruction>(p2p);
return true;
}
break;
case HloOpcode::kSend:
if (send == nullptr) {
send = Cast<HloSendInstruction>(p2p);
return true;
}
break;
default:
break;
}
return false;
}
bool RecordWhileOp(HloInstruction* while_op) {
if (while_loop != nullptr) {
return false;
}
if (!RecordParentComputation(while_op->parent())) {
return false;
}
while_loop = while_op;
return true;
}
bool Incomplete() const {
return recv_done == nullptr || send_done == nullptr || recv == nullptr ||
send == nullptr;
}
bool IncompletePipelinedParent() const {
return Incomplete() || while_loop == nullptr;
}
P2PRuntimeStream GetRuntimeStream(const HloInstruction* start) const {
auto it = start->frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it != start->frontend_attributes().map().end()) {
if (it->second == "0") {
return kStream0;
}
if (it->second == "1") {
return kStream1;
}
}
return kUnknown;
}
P2PRuntimeStream GetRuntimeStream() const {
P2PRuntimeStream send_stream = GetRuntimeStream(send);
P2PRuntimeStream recv_stream = GetRuntimeStream(recv);
if (send_stream != recv_stream) {
return kUnknown;
}
return send_stream;
}
int64_t GetChannel() const { return recv->channel_id().value(); }
HloRecvDoneInstruction* recv_done = nullptr;
HloSendDoneInstruction* send_done = nullptr;
HloRecvInstruction* recv = nullptr;
HloSendInstruction* send = nullptr;
HloComputation* computation = nullptr;
HloInstruction* while_loop = nullptr;
};
struct P2PGroup;
using P2PGroupMap = absl::flat_hash_map<int64_t, P2PGroup>;
using P2PInComputation =
absl::flat_hash_map<const HloComputation*, std::set<int64_t>>;
using CollectiveInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
using ChainStartEnd =
std::pair<HloSendRecvInstruction*, HloSendRecvInstruction*>;
static constexpr int kUnpipelinedNodeIdx = 0;
static constexpr int kPipelinedChildNodeIdx = 0;
static constexpr int kPipelinedParentNodeIdx = 1;
struct P2PGroup {
absl::Status RecordP2POpForUnpipelinedGroup(HloSendRecvInstruction* p2p) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind != kUnpipelined) {
return Internal("Expected unpipelined group");
}
P2PGroupNode& node = nodes[kUnpipelinedNodeIdx];
if (!node.RecordP2POp(p2p)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
absl::Status RecordP2POpForPipelinedGroup(HloSendRecvInstruction* p2p) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind == kUnpipelined) {
if (nodes[kPipelinedParentNodeIdx].computation != nullptr) {
return Internal("Expected unpipelined group");
}
kind = kPipelined;
}
P2PGroupNode& node = nodes[kPipelinedParentNodeIdx];
if (!node.RecordP2POp(p2p)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
absl::Status RecordWhileOpToPipelinedGroup(HloInstruction* while_op) {
if (kind == kUnrecognized) {
return absl::OkStatus();
}
if (kind == kUnpipelined) {
return Internal("Expected pipelined group");
}
P2PGroupNode& node = nodes[kPipelinedParentNodeIdx];
if (!node.RecordWhileOp(while_op)) {
kind = kUnrecognized;
}
return absl::OkStatus();
}
bool RecordRuntimeStream() {
P2PRuntimeStream child_stream =
nodes[kPipelinedChildNodeIdx].GetRuntimeStream();
if (kind == kPipelined) {
P2PRuntimeStream parent_stream =
nodes[kPipelinedParentNodeIdx].GetRuntimeStream();
if (child_stream != parent_stream || child_stream == kUnknown) {
return false;
}
}
runtime_stream = child_stream;
return true;
}
absl::Status RecordComplementGroup(P2PGroupMap& p2p_group_map) {
CHECK(!complement_group_channel.has_value() && runtime_stream == kStream1);
for (auto& [channel, p2p_group] : p2p_group_map) {
if (&p2p_group == this ||
p2p_group.ChildComputation() != ChildComputation()) {
continue;
}
if (p2p_group.kind == kPipelined &&
p2p_group.ParentComputation() == ParentComputation()) {
if (p2p_group.runtime_stream != kStream0) {
return Internal(
"Expected different pipeline stream for complement group");
}
complement_group_channel = channel;
p2p_group.complement_group_channel = GetChannel();
} else if (p2p_group.kind == kUnpipelined &&
p2p_group.runtime_stream == kStream0) {
complement_group_channel = channel;
p2p_group.complement_group_channel = GetChannel();
}
}
return absl::OkStatus();
}
HloComputation* ParentComputation() const { return GetParent().computation; }
HloComputation* ChildComputation() const { return GetChild().computation; }
int64_t GetChannel() const { return nodes[kUnpipelinedNodeIdx].GetChannel(); }
P2PGroupNode& GetChild() { return nodes[kPipelinedChildNodeIdx]; }
P2PGroupNode& GetParent() { return nodes[kPipelinedParentNodeIdx]; }
const P2PGroupNode& GetChild() const { return nodes[kPipelinedChildNodeIdx]; }
const P2PGroupNode& GetParent() const {
return nodes[kPipelinedParentNodeIdx];
}
ChainStartEnd GetChainStartEnd(const HloComputation* computation,
const P2PGroupMap& p2p_group_map) const {
if (computation == ChildComputation()) {
if (!InCycle()) {
return std::make_pair(GetChild().recv, GetChild().send_done);
}
if (runtime_stream == kStream1) {
return std::make_pair(
GetComplementGroup(p2p_group_map)->GetChild().recv,
GetChild().send_done);
}
return std::make_pair(
GetChild().recv,
GetComplementGroup(p2p_group_map)->GetChild().send_done);
}
CHECK(kind == kPipelined && computation == ParentComputation());
if (!InCycle()) {
return std::make_pair(GetParent().recv, GetParent().send_done);
}
if (runtime_stream == kStream1) {
return std::make_pair(GetComplementGroup(p2p_group_map)->GetParent().recv,
GetParent().send_done);
}
return std::make_pair(
GetParent().recv,
GetComplementGroup(p2p_group_map)->GetParent().send_done);
}
HloInstruction* GetWhileOp() const {
return nodes[kPipelinedParentNodeIdx].while_loop;
}
bool InCycle() const { return complement_group_channel.has_value(); }
P2PGroup* GetComplementGroup(P2PGroupMap& p2p_group_map) const {
CHECK(InCycle());
return &p2p_group_map.at(*complement_group_channel);
}
const P2PGroup* GetComplementGroup(const P2PGroupMap& p2p_group_map) const {
CHECK(InCycle());
return &p2p_group_map.at(*complement_group_channel);
}
P2PGroupKind kind = kUnpipelined;
P2PGroupNode nodes[2];
P2PRuntimeStream runtime_stream = kUnknown;
std::optional<int64_t> complement_group_channel = std::nullopt;
};
bool MayInvokeCollectiveOp(
const HloInstruction* hlo,
const CollectiveInComputation& collective_in_computation) {
if (IsCollectiveOp(hlo)) {
return true;
}
for (auto callee : hlo->called_computations()) {
auto collective_in_comp = collective_in_computation.find(callee);
if (collective_in_comp != collective_in_computation.end() &&
collective_in_comp->second) {
return true;
}
}
return false;
}
absl::Status MayAddWhileOpToPipelinedGroup(HloInstruction* while_op,
P2PInComputation& p2p_in_computation,
P2PGroupMap& p2p_group_map) {
if (while_op->while_init()->opcode() != HloOpcode::kTuple) {
return absl::OkStatus();
}
HloComputation* body = while_op->called_computations()[0];
auto p2p_in_while = p2p_in_computation.find(body);
if (p2p_in_while == p2p_in_computation.end()) {
return absl::OkStatus();
}
int pipelined_group = 0;
for (auto hlo : while_op->while_init()->operands()) {
if (hlo->opcode() != HloOpcode::kSendDone) {
continue;
}
int64_t channel_id = hlo->channel_id().value();
if (p2p_in_while->second.find(channel_id) == p2p_in_while->second.end()) {
continue;
}
auto group = p2p_group_map.find(channel_id);
if (group == p2p_group_map.end() || group->second.kind != kPipelined) {
continue;
}
pipelined_group++;
if (pipelined_group > 2) {
return Internal(
"Expecting up to two pipelined P2P groups for each while-loop");
}
TF_RETURN_IF_ERROR(group->second.RecordWhileOpToPipelinedGroup(while_op));
}
return absl::OkStatus();
}
absl::Status OrderBefore(HloInstruction* i1, HloInstruction* i2) {
TF_RETURN_IF_ERROR(i1->AddControlDependencyTo(i2));
VLOG(10) << "Add control predecessor " << i2->ToString();
return absl::OkStatus();
}
absl::Status ConnectP2P1NodeChain(const P2PGroupNode& node) {
HloRecvDoneInstruction* recv_done = node.recv_done;
HloRecvInstruction* recv = node.recv;
HloSendDoneInstruction* send_done = node.send_done;
HloSendInstruction* send = node.send;
TF_RETURN_IF_ERROR(OrderBefore(recv, send));
TF_RETURN_IF_ERROR(OrderBefore(send, recv_done));
TF_RETURN_IF_ERROR(OrderBefore(recv_done, send_done));
return absl::OkStatus();
}
absl::Status ConnectUnpipelinedP2P(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetChild());
}
absl::Status ConnectPipelined1P2PChild(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetChild());
}
absl::Status ConnectP2P2NodeChain(const P2PGroupNode& node0,
const P2PGroupNode& node1) {
HloSendRecvInstruction* recv_done0 = node0.recv_done;
HloRecvInstruction* recv0 = node0.recv;
HloSendRecvInstruction* send_done0 = node0.send_done;
HloSendInstruction* send0 = node0.send;
HloSendRecvInstruction* recv_done1 = node1.recv_done;
HloRecvInstruction* recv1 = node1.recv;
HloSendRecvInstruction* send_done1 = node1.send_done;
HloSendInstruction* send1 = node1.send;
TF_RETURN_IF_ERROR(OrderBefore(recv_done0, recv_done1));
TF_RETURN_IF_ERROR(OrderBefore(recv_done1, send_done0));
TF_RETURN_IF_ERROR(OrderBefore(send_done0, send_done1));
TF_RETURN_IF_ERROR(OrderBefore(recv0, send0));
TF_RETURN_IF_ERROR(OrderBefore(send0, recv1));
TF_RETURN_IF_ERROR(OrderBefore(recv1, send1));
TF_RETURN_IF_ERROR(OrderBefore(send1, recv_done0));
return absl::OkStatus();
}
absl::Status ConnectPipelined2P2PChild(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetChild(),
p2p_group.GetChild());
}
absl::Status ConnectPipelined1P2PParent(const P2PGroup& p2p_group) {
return ConnectP2P1NodeChain(p2p_group.GetParent());
}
absl::Status ConnectPipelined2P2PParent(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetParent(),
p2p_group.GetParent());
}
absl::Status ConnectUnpipelined2P2P(const P2PGroup& p2p_group,
const P2PGroupMap& p2p_group_map) {
CHECK(p2p_group.runtime_stream == kStream1);
return ConnectP2P2NodeChain(
p2p_group.GetComplementGroup(p2p_group_map)->GetChild(),
p2p_group.GetChild());
}
absl::Status GatherP2PGroupsAndCollectiveInfo(
const HloComputation* computation, P2PInComputation& p2p_in_computation,
P2PGroupMap& p2p_group_map,
CollectiveInComputation& collective_in_computation) {
collective_in_computation[computation] = false;
std::vector<HloInstruction*> while_ops;
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (MayInvokeCollectiveOp(hlo, collective_in_computation)) {
collective_in_computation[computation] = true;
}
if (hlo->opcode() == HloOpcode::kWhile) {
while_ops.push_back(hlo);
continue;
}
if (!IsP2POp(hlo)) {
continue;
}
HloSendRecvInstruction* p2p = Cast<HloSendRecvInstruction>(hlo);
int64_t channel = p2p->channel_id().value();
auto p2p_group = p2p_group_map.find(channel);
if (p2p_group == p2p_group_map.end()) {
P2PGroup group;
TF_RETURN_IF_ERROR(group.RecordP2POpForUnpipelinedGroup(p2p));
p2p_group_map[channel] = group;
} else {
P2PGroup& group = p2p_group->second;
if (group.ChildComputation() == computation) {
TF_RETURN_IF_ERROR(group.RecordP2POpForUnpipelinedGroup(p2p));
} else {
TF_RETURN_IF_ERROR(group.RecordP2POpForPipelinedGroup(p2p));
}
}
auto p2p_in_comp = p2p_in_computation.find(computation);
if (p2p_in_comp == p2p_in_computation.end()) {
p2p_in_computation[computation] = {channel};
} else {
p2p_in_comp->second.insert(channel);
}
}
for (auto hlo : while_ops) {
TF_RETURN_IF_ERROR(
MayAddWhileOpToPipelinedGroup(hlo, p2p_in_computation, p2p_group_map));
}
for (auto& [channel, p2p_group] : p2p_group_map) {
if (p2p_group.kind == kUnpipelined) {
if (p2p_group.nodes[kUnpipelinedNodeIdx].Incomplete() ||
!p2p_group.RecordRuntimeStream()) {
p2p_group.kind = kUnrecognized;
}
} else if (p2p_group.kind == kPipelined) {
if (p2p_group.nodes[kPipelinedChildNodeIdx].Incomplete() ||
p2p_group.nodes[kPipelinedParentNodeIdx]
.IncompletePipelinedParent() ||
!p2p_group.RecordRuntimeStream()) {
p2p_group.kind = kUnrecognized;
}
}
}
absl::erase_if(p2p_group_map, [](const auto& p2p_group) {
return p2p_group.second.kind == kUnrecognized;
});
for (auto& [channel, p2p_group] : p2p_group_map) {
if ((p2p_group.kind == kPipelined &&
p2p_group.ParentComputation() != computation) ||
p2p_group.InCycle() || p2p_group.runtime_stream != kStream1) {
continue;
}
TF_RETURN_IF_ERROR(p2p_group.RecordComplementGroup(p2p_group_map));
}
return absl::OkStatus();
}
absl::StatusOr<std::pair<int, const P2PGroup*>> ConnectP2PChain(
HloComputation* computation, const P2PGroupMap& p2p_group_map,
const std::set<int64_t>& p2p_channels) {
const P2PGroup* pipelined_group = nullptr;
int num_p2p_chains = 0;
for (int64_t channel : p2p_channels) {
auto it = p2p_group_map.find(channel);
if (it == p2p_group_map.end()) {
continue;
}
num_p2p_chains++;
const P2PGroup& p2p_group = it->second;
P2PGroupKind kind = p2p_group.kind;
if (kind == P2PGroupKind::kUnpipelined) {
if (!p2p_group.InCycle()) {
TF_RETURN_IF_ERROR(ConnectUnpipelinedP2P(p2p_group));
} else if (p2p_group.runtime_stream == kStream1) {
TF_RETURN_IF_ERROR(ConnectUnpipelined2P2P(p2p_group, p2p_group_map));
}
continue;
}
if (!p2p_group.InCycle()) {
if (computation == p2p_group.ParentComputation()) {
TF_RETURN_IF_ERROR(ConnectPipelined1P2PParent(p2p_group));
} else {
if (pipelined_group != nullptr) {
return Internal("Expected <=1 pipelined group in a while-body");
}
pipelined_group = &p2p_group;
TF_RETURN_IF_ERROR(ConnectPipelined1P2PChild(p2p_group));
}
continue;
}
if (p2p_group.runtime_stream != kStream1) {
continue;
}
if (computation == p2p_group.ParentComputation()) {
TF_RETURN_IF_ERROR(ConnectPipelined2P2PParent(p2p_group, p2p_group_map));
} else {
if (pipelined_group != nullptr) {
return Internal(
"Expected only two pipelined groups forming a cycle in a "
"while-body");
}
pipelined_group = &p2p_group;
TF_RETURN_IF_ERROR(ConnectPipelined2P2PChild(p2p_group, p2p_group_map));
}
}
return std::make_pair(num_p2p_chains, pipelined_group);
}
absl::Status OrderBefore(HloReachabilityMap* reachability, HloInstruction* a,
HloInstruction* b) {
VLOG(10) << "OrderBefore " << a->ToString() << " " << b->ToString();
if (!reachability->IsReachable(a, b)) {
TF_RETURN_IF_ERROR(a->AddControlDependencyTo(b));
VLOG(10) << "add control predecessor " << b->ToString();
reachability->UpdateReachabilityThroughInstruction(b);
}
return absl::OkStatus();
}
absl::Status LinearizeCollectivesWithOtherP2P(
const P2PGroupMap& p2p_group_map, const P2PGroup& group,
const CollectiveInComputation& collective_in_computation,
const std::vector<HloInstruction*>::iterator& chain_start_iter,
const std::vector<HloInstruction*>::iterator& begin_iter,
const std::vector<HloInstruction*>::iterator& end_iter,
HloReachabilityMap* reachability) {
HloComputation* computation = (*chain_start_iter)->parent();
ChainStartEnd start_end = group.GetChainStartEnd(computation, p2p_group_map);
for (auto it = begin_iter; it != end_iter; ++it) {
HloInstruction* hlo = *it;
if (IsP2POp(hlo)) {
auto group_it = p2p_group_map.find(hlo->channel_id().value());
if (group_it == p2p_group_map.end()) {
continue;
}
const P2PGroup& cur_group = group_it->second;
P2PGroupKind kind = cur_group.kind;
if (kind == P2PGroupKind::kPipelined &&
computation == cur_group.ChildComputation()) {
continue;
}
ChainStartEnd cur_start_end =
cur_group.GetChainStartEnd(computation, p2p_group_map);
if (cur_start_end.first != hlo) {
continue;
}
if (it <= chain_start_iter) {
continue;
}
if (reachability->IsReachable(start_end.first, cur_start_end.second)) {
TF_RETURN_IF_ERROR(
OrderBefore(reachability, start_end.second, cur_start_end.first));
} else {
TF_RETURN_IF_ERROR(
OrderBefore(reachability, cur_start_end.second, start_end.first));
}
continue;
}
if (!MayInvokeCollectiveOp(hlo, collective_in_computation)) {
continue;
}
if (hlo->opcode() == HloOpcode::kWhile &&
group.kind == P2PGroupKind::kPipelined && group.GetWhileOp() == hlo) {
continue;
}
if (hlo_query::IsAsyncCollectiveDoneOp(hlo, false)) {
if (reachability->IsReachable(start_end.first, hlo)) {
TF_RETURN_IF_ERROR(OrderBefore(reachability, start_end.second,
GetStartOpForDoneOp(hlo)));
} else {
TF_RETURN_IF_ERROR(OrderBefore(reachability, hlo, start_end.first));
}
}
if (reachability->IsReachable(start_end.first, hlo)) {
TF_RETURN_IF_ERROR(OrderBefore(reachability, start_end.second, hlo));
} else {
TF_RETURN_IF_ERROR(OrderBefore(reachability, hlo, start_end.first));
}
}
return absl::OkStatus();
}
absl::Status LinearizeCollectivesWithPipelinedP2PChild(
const P2PGroupMap& p2p_group_map, const P2PGroup& group,
const CollectiveInComputation& collective_in_computation,
HloComputation* computation, HloReachabilityMap* reachability) {
ChainStartEnd start_end = group.GetChainStartEnd(computation, p2p_group_map);
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (!MayInvokeCollectiveOp(hlo, collective_in_computation)) {
continue;
}
HloOpcode opcode = hlo->opcode();
if (IsP2POp(hlo) && opcode != HloOpcode::kSendDone) {
continue;
}
if (hlo->opcode() == HloOpcode::kSendDone) {
auto group_it = p2p_group_map.find(hlo->channel_id().value());
if (group_it == p2p_group_map.end()) {
continue;
}
const P2PGroup& cur_group = group_it->second;
P2PGroupKind kind = cur_group.kind;
if (kind == P2PGroupKind::kPipelined &&
computation == cur_group.ChildComputation()) {
continue;
}
ChainStartEnd cur_start_end =
cur_group.GetChainStartEnd(computation, p2p_group_map);
TF_RETURN_IF_ERROR(
OrderBefore(reachability, cur_start_end.second, start_end.first));
continue;
}
TF_RETURN_IF_ERROR(OrderBefore(reachability, hlo, start_end.first));
}
return absl::OkStatus();
}
}
absl::StatusOr<bool> P2PSchedulePreparation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
P2PGroupMap p2p_group_map;
P2PInComputation p2p_in_computation;
CollectiveInComputation collective_in_computation;
std::vector<HloComputation*> all_computations =
module->MakeComputationPostOrder(execution_threads);
for (auto iter = all_computations.begin(); iter != all_computations.end();
++iter) {
VLOG(10) << "Gathering P2P groups and collective info for computation "
<< (*iter)->name();
TF_RETURN_IF_ERROR(GatherP2PGroupsAndCollectiveInfo(
*iter, p2p_in_computation, p2p_group_map, collective_in_computation));
}
if (p2p_group_map.empty()) {
return false;
}
for (auto iter = all_computations.rbegin(); iter != all_computations.rend();
++iter) {
HloComputation* computation = *iter;
auto p2p_in_comp = p2p_in_computation.find(computation);
if (p2p_in_comp == p2p_in_computation.end()) {
continue;
}
std::set<int64_t>& p2p_channels = p2p_in_comp->second;
TF_ASSIGN_OR_RETURN(
auto result, ConnectP2PChain(computation, p2p_group_map, p2p_channels));
if (result.first == 0) {
continue;
}
VLOG(10) << "Processing computation " << computation->name()
<< " num_p2p_chains " << result.first;
std::unique_ptr<HloReachabilityMap> reachability =
HloReachabilityMap::Build(computation);
if (result.second != nullptr) {
TF_RETURN_IF_ERROR(LinearizeCollectivesWithPipelinedP2PChild(
p2p_group_map, *result.second, collective_in_computation, computation,
reachability.get()));
}
std::vector<HloInstruction*> all_instructions =
computation->MakeInstructionPostOrder();
std::vector<HloInstruction*>::iterator begin = all_instructions.begin();
std::vector<HloInstruction*>::iterator end = all_instructions.end();
for (auto instr_it = begin; instr_it != end; ++instr_it) {
HloInstruction* hlo = *instr_it;
if (!IsP2POp(hlo)) {
continue;
}
HloSendRecvInstruction* p2p = Cast<HloSendRecvInstruction>(hlo);
int64_t channel = p2p->channel_id().value();
auto group_it = p2p_group_map.find(channel);
if (group_it == p2p_group_map.end()) {
continue;
}
P2PGroup& group = group_it->second;
P2PGroupKind kind = group.kind;
if (kind == P2PGroupKind::kPipelined &&
computation == group.ChildComputation()) {
continue;
}
ChainStartEnd start_end =
group.GetChainStartEnd(computation, p2p_group_map);
if (start_end.first != hlo) {
continue;
}
VLOG(10) << "linearize other collectives with respect to channel "
<< hlo->ToString();
TF_RETURN_IF_ERROR(LinearizeCollectivesWithOtherP2P(
p2p_group_map, group, collective_in_computation, instr_it, begin, end,
reachability.get()));
VLOG(10) << "finish connect other collectives with channel ";
}
}
return true;
}
} | #include "xla/service/p2p_schedule_preparation.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class P2PSchedulePreparationTest : public HloTestBase {
public:
void VerifyP2PNotTransformed(HloModule* module,
const std::string& suffix = "") {
HloInstruction* recv = FindInstruction(module, "recv" + suffix);
HloInstruction* recv_done = FindInstruction(module, "recv-done" + suffix);
HloInstruction* send_done = FindInstruction(module, "send-done" + suffix);
EXPECT_EQ(recv->control_predecessors().size(), 0);
EXPECT_EQ(recv_done->control_predecessors().size(), 0);
EXPECT_EQ(send_done->control_predecessors().size(), 0);
}
void VerifyP2P1GroupChain(HloModule* module, const std::string& suffix) {
HloInstruction* send = FindInstruction(module, "send" + suffix);
HloInstruction* recv = FindInstruction(module, "recv" + suffix);
HloInstruction* recv_done = FindInstruction(module, "recv-done" + suffix);
HloInstruction* send_done = FindInstruction(module, "send-done" + suffix);
EXPECT_EQ(send->control_predecessors()[0], recv);
EXPECT_EQ(recv_done->control_predecessors()[0], send);
EXPECT_EQ(send_done->control_predecessors()[0], recv_done);
}
void VerifyUnpipelinedP2P(HloModule* module, const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyPipelinedP2PChild(HloModule* module,
const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyPipelinedP2PParent(HloModule* module,
const std::string& suffix = "") {
VerifyP2P1GroupChain(module, suffix);
}
void VerifyP2P2GroupChain(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
HloInstruction* send0 = FindInstruction(module, "send" + suffix0);
HloInstruction* recv0 = FindInstruction(module, "recv" + suffix0);
HloInstruction* recv_done0 = FindInstruction(module, "recv-done" + suffix0);
HloInstruction* send_done0 = FindInstruction(module, "send-done" + suffix0);
HloInstruction* send1 = FindInstruction(module, "send" + suffix1);
HloInstruction* recv1 = FindInstruction(module, "recv" + suffix1);
HloInstruction* recv_done1 = FindInstruction(module, "recv-done" + suffix1);
HloInstruction* send_done1 = FindInstruction(module, "send-done" + suffix1);
EXPECT_EQ(recv_done1->control_predecessors()[0], recv_done0);
EXPECT_EQ(send_done0->control_predecessors()[0], recv_done1);
EXPECT_EQ(send_done1->control_predecessors()[0], send_done0);
EXPECT_EQ(send0->control_predecessors()[0], recv0);
EXPECT_EQ(recv1->control_predecessors()[0], send0);
EXPECT_EQ(send1->control_predecessors()[0], recv1);
EXPECT_EQ(recv_done0->control_predecessors()[0], send1);
}
void VerifyPipelined2P2PChild(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
VerifyP2P2GroupChain(module, suffix0, suffix1);
}
void VerifyPipelined2P2PParent(HloModule* module, const std::string& suffix0,
const std::string& suffix1) {
VerifyP2P2GroupChain(module, suffix0, suffix1);
}
};
constexpr char kEmpty[] = "";
constexpr char kHostTransfer[] = ", is_host_transfer=true";
std::string GetUnnestedP2PModuleString(bool is_host = false,
bool incomplete = false) {
constexpr char kSend[] = R"(
send = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
} %s
send-done = token[] send-done(send), channel_id=2 %s
)";
constexpr char kSimpleModule[] = R"(
HloModule test
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
} %s
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2 %s
%s
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
}
)";
const char* is_host_str = is_host ? kHostTransfer : kEmpty;
if (incomplete) {
return absl::StrFormat(kSimpleModule, is_host_str, is_host_str, kEmpty);
}
std::string send_str = absl::StrFormat(kSend, is_host_str, is_host_str);
return absl::StrFormat(kSimpleModule, is_host_str, is_host_str, send_str);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainHostNotTransformed) {
std::string kModuleStr = GetUnnestedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainIncompleteNotTransformed) {
std::string kModuleStr =
GetUnnestedP2PModuleString(false, true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(P2PSchedulePreparationTest, UnnestedP2PChainTransformed) {
std::string kModuleStr = GetUnnestedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyUnpipelinedP2P(module.get());
}
std::string GetNestedP2PModuleString(bool while_p2p_is_host = false,
bool main_p2p_is_host = false) {
constexpr char kModuleTemplate[] = R"(
HloModule test
while-cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
while-body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}"
} %s
send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1 %s
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
send-done = token[] send-done(send), channel_id=1 %s
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
ROOT body-result = (u32[], f32[1, 1024, 1024]) tuple(new-count, recv-data)
}
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.1 = token[] after-all()
recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
} %s
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=2 %s
send-done.1 = token[] send-done(send.1), channel_id=2 %s
recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1), index=0
while-init = (u32[], f32[1, 1024, 1024]) tuple(c0, recv-data.1)
while-result = (u32[], f32[1, 1024, 1024]) while(while-init),
body=while-body, condition=while-cond
while-result-data = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
ROOT entry-result = f32[1, 1024, 1024] add(while-result-data, recv-data.1)
}
)";
const char* while_p2p = while_p2p_is_host ? kHostTransfer : kEmpty;
const char* main_p2p = main_p2p_is_host ? kHostTransfer : kEmpty;
return absl::StrFormat(kModuleTemplate, while_p2p, while_p2p, while_p2p,
while_p2p, main_p2p, main_p2p, main_p2p, main_p2p);
}
TEST_F(P2PSchedulePreparationTest, WhileP2PIsHostNotMainTransformed) {
std::string kModuleStr = GetNestedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyP2PNotTransformed(module.get());
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* send_done = FindInstruction(module.get(), "send-done.1");
HloInstruction* while_loop = FindInstruction(module.get(), "while-result");
EXPECT_EQ(while_loop->control_predecessors()[0], send_done);
}
TEST_F(P2PSchedulePreparationTest, MainP2PIsHostNotWhileTransformed) {
std::string kModuleStr = GetNestedP2PModuleString(false,
true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyUnpipelinedP2P(module.get());
VerifyP2PNotTransformed(module.get(), ".1");
}
TEST_F(P2PSchedulePreparationTest, NestedP2PChainTransformed) {
std::string kModuleStr = GetNestedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyUnpipelinedP2P(module.get());
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* send_done = FindInstruction(module.get(), "send-done.1");
HloInstruction* recv_user = FindInstruction(module.get(), "while-result");
EXPECT_EQ(recv_user->control_predecessors()[0], send_done);
}
std::string GetPipelinedP2PModuleString(bool nested_p2p_in_main = false,
bool other_p2p_in_while = false,
bool test_custom_call = false) {
constexpr char kWhileForMain[] = R"(
while-cond-2 {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result-2 = pred[] compare(count, ub), direction=LT
}
while-body-2 {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.3 = token[] after-all()
recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=3,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}"
}
send.3 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.3),
channel_id=3, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}
recv-done.3 = (f32[1, 1024, 1024], token[]) recv-done(recv.3), channel_id=3
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.3), index=0
send-done.3 = token[] send-done(send.3), channel_id=3
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
ROOT body-result-2 = (u32[], f32[1, 1024, 1024]) tuple(new-count, recv-data)
}
)";
constexpr char kUnnestedResult[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
collective-permute.2 = f32[1, 1024, 1024] collective-permute(init),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, collective-permute.2)
)";
constexpr char kUnnestedResultWithCustomCall[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
custom-call = f32[1, 1024, 1024] custom-call(init),
custom_call_target="my_custom_call"
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, custom-call)
)";
constexpr char kNestedResult[] = R"(
while-result-1 = f32[1, 1024, 1024] get-tuple-element(while-result), index=1
while-init-2 = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while-2 = (u32[], f32[1, 1024, 1024]) while(while-init-2),
body=while-body-2, condition=while-cond-2,
backend_config={"known_trip_count":{"n":"25"}}
while-result-2 = f32[1, 1024, 1024] get-tuple-element(while-2), index=1
ROOT entry-result = f32[1, 1024, 1024] add(while-result-1, while-result-2)
)";
constexpr char kPipelinedWhileBodyWithoutOtherP2P[] = R"(
while-body {
param = (u32[], (f32[1, 1024, 1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1, 1024, 1024], token[]) get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
collective-permute.1 = f32[1, 1024, 1024] collective-permute(s),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
new-data = f32[1, 1024, 1024] add(c, collective-permute.1)
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], token[]) send(new-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
)";
constexpr char kPipelinedWhileBodyWithOtherP2P[] = R"(
while-body {
param = (u32[], (f32[1, 1024, 1024], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.1.q = (f32[1, 1024, 1024], token[])get-tuple-element(param), index=1
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0
c1 = u32[] constant(1)
new-count = u32[] add(count, c1)
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
p = f32[1, 1024, 1024] broadcast(conv), dimensions={}
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
collective-permute.1 = f32[1, 1024, 1024] collective-permute(s),
source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}
send-data = f32[1, 1024, 1024] add(c, collective-permute.1)
after-all.4 = token[] after-all()
send.4 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.4),
channel_id=4, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"
}
send-done.4 = token[] send-done(send.4), channel_id=4
recv.4 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.4), channel_id=4,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}"
}
recv-done.4 = (f32[1, 1024, 1024], token[]) recv-done(recv.4), channel_id=4
new-data = f32[1, 1024, 1024] get-tuple-element(recv-done.4), index=0
after-all.1 = token[] after-all()
send.1 = (f32[1, 1024, 1024], token[]) send(new-data, after-all.1),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.1 = token[] send-done(send.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv.1 = (f32[1, 1024, 1024], token[]) recv(after-all.1), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.1 = (f32[1, 1024, 1024], token[]) recv-done(recv.1), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT body-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(new-count, recv-done.1, send-done.1)
}
)";
constexpr char kModuleTemplate[] = R"(
HloModule test
while-cond {
param = (u32[], (f32[1, 1024, 1024], u32[], token[]), token[]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(25)
ROOT cond-result = pred[] compare(count, ub), direction=LT
}
%s
%s
ENTRY test-computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all.2 = token[] after-all()
recv.2 = (f32[1, 1024, 1024], token[]) recv(after-all.2), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (f32[1, 1024, 1024], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send.2 = (f32[1, 1024, 1024], token[]) send(init, after-all.2),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}",
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
while-init = (u32[], (f32[1, 1024, 1024], token[]), token[])
tuple(c0, recv-done.2, send-done.2)
while-result = (u32[], (f32[1, 1024, 1024], token[]), token[])
while(while-init),
body=while-body, condition=while-cond,
backend_config={"known_trip_count":{"n":"25"}}
recv-done.2.q = (f32[1, 1024, 1024], token[]) get-tuple-element(while-result), index=1
recv-data.2.q = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0
%s
}
)";
const char* while_str = nested_p2p_in_main ? kWhileForMain : kEmpty;
const char* pipelined_while_body_str =
other_p2p_in_while ? kPipelinedWhileBodyWithOtherP2P
: kPipelinedWhileBodyWithoutOtherP2P;
const char* result_str =
nested_p2p_in_main ? kNestedResult
: (test_custom_call ? kUnnestedResultWithCustomCall
: kUnnestedResult);
return absl::StrFormat(kModuleTemplate, while_str, pipelined_while_body_str,
result_str);
}
TEST_F(P2PSchedulePreparationTest, UnnestedPipelinedP2PChainTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
HloInstruction* recv_1 = FindInstruction(module.get(), "recv.1");
HloInstruction* collective_1 =
FindInstruction(module.get(), "collective-permute.1");
EXPECT_EQ(recv_1->control_predecessors()[0], collective_1);
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* collective_2 =
FindInstruction(module.get(), "collective-permute.2");
EXPECT_TRUE((!collective_2->control_predecessors().empty() &&
collective_2->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == collective_2));
}
TEST_F(P2PSchedulePreparationTest, NestedPipelinedP2PChainTransformed) {
std::string kModuleStr =
GetPipelinedP2PModuleString(true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
VerifyUnpipelinedP2P(module.get(), ".3");
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* while_2 = FindInstruction(module.get(), "while-2");
EXPECT_TRUE((!while_2->control_predecessors().empty() &&
while_2->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == while_2));
}
TEST_F(P2PSchedulePreparationTest,
UnnestedPipelinedP2PChainWithOtherP2PTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString(
false, true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(10) << module->ToString();
VerifyPipelinedP2PChild(module.get(), ".1");
VerifyPipelinedP2PParent(module.get(), ".2");
VerifyUnpipelinedP2P(module.get(), ".4");
HloInstruction* pipelined_recv = FindInstruction(module.get(), "recv.1");
HloInstruction* other_send_done =
FindInstruction(module.get(), "send-done.4");
EXPECT_EQ(1, absl::c_count(pipelined_recv->control_predecessors(),
other_send_done));
}
TEST_F(P2PSchedulePreparationTest,
UnnestedPipelinedP2PChainWithCustomCallTransformed) {
std::string kModuleStr = GetPipelinedP2PModuleString(
false, false,
true);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* send_done_2 = FindInstruction(module.get(), "send-done.2");
HloInstruction* recv_2 = FindInstruction(module.get(), "recv.2");
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
EXPECT_TRUE((!custom_call->control_predecessors().empty() &&
custom_call->control_predecessors()[0] == send_done_2) ||
(!recv_2->control_predecessors().empty() &&
recv_2->control_predecessors()[0] == custom_call));
}
TEST_F(P2PSchedulePreparationTest, PipelinedP2PChain2Transformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(10)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) parameter(0)
count = get-tuple-element(param), index=0
recv-done.0.f = (u32[2], token[]) get-tuple-element(param), index=1
recv-data.0 = u32[2] get-tuple-element(recv-done.0.f), index=0
recv-done.1.f = (u32[2], token[]) get-tuple-element(param), index=2
recv-data.1 = u32[2] get-tuple-element(recv-done.1.f), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(s, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1.n = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1.n), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (u32[2], u32[], token[]) send(s, after-all.1.n),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
ROOT result = (u32[], (u32[2], token[]), (u32[2], token[]), token[], token[])
tuple(new_count, recv-done.0, recv-done.1, send-done.0, send-done.1)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
after-all.0.p = token[] after-all()
recv.2 = (u32[2], u32[], token[]) recv(after-all.0.p), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.2 = (u32[2], u32[], token[]) send(init, after-all.0.p),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.2 = (u32[2], token[]) recv-done(recv.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.2 = token[] send-done(send.2), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1.p = token[] after-all()
recv.3 = (u32[2], u32[], token[]) recv(after-all.1.p), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.3 = (u32[2], u32[], token[]) send(init, after-all.1.p),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.3 = (u32[2], token[]) recv-done(recv.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.3 = token[] send-done(send.3), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
while_init = (u32[], (u32[2], token[]), (u32[2], token[]),
token[], token[]) tuple(c0, recv-done.2, recv-done.3, send-done.2, send-done.3)
while_result = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[]),
token[], token[]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"10"}}
recv-done.0.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=1
recv-data.0.q = u32[2] get-tuple-element(recv-done.0.q), index=0
recv-done.1.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=2
recv-data.1.q = u32[2] get-tuple-element(recv-done.1.q), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0.q, recv-data.1.q)
s = u32[2] add(c1, recv-data)
ROOT result = u32[2] add(s, recv-data)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
VerifyPipelined2P2PChild(module.get(), ".0", ".1");
VerifyPipelined2P2PParent(module.get(), ".2", ".3");
}
TEST_F(P2PSchedulePreparationTest, UnpipelinedP2PChain2Transformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = u32[2] get-tuple-element(param), index=1
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}",
_xla_send_recv_pipeline="1"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=2,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"11"}}
ROOT recv-data = u32[2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyP2P2GroupChain(module.get(), ".0", ".1");
}
TEST_F(P2PSchedulePreparationTest, Unpipelined2SeparatedChainTransformed) {
const char* const kModuleStr = R"(
HloModule test
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(11)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = u32[2] get-tuple-element(param), index=1
after-all.0.n = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_send_recv_pipeline="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2}}"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1},{1,2}}"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=2
send-done.1 = token[] send-done(send.1), channel_id=2
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond,
backend_config={"known_trip_count":{"n":"11"}}
ROOT recv-data = u32[2] get-tuple-element(while_result), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kModuleStr)));
P2PSchedulePreparation preparation;
TF_ASSERT_OK_AND_ASSIGN(bool changed, preparation.Run(module.get()));
EXPECT_TRUE(changed);
VerifyUnpipelinedP2P(module.get(), ".0");
VerifyUnpipelinedP2P(module.get(), ".1");
HloInstruction* recv0 = FindInstruction(module.get(), "recv.0");
if (!recv0->control_predecessors().empty()) {
HloInstruction* send_done1 = FindInstruction(module.get(), "send-done.1");
EXPECT_EQ(recv0->control_predecessors()[0], send_done1);
} else {
HloInstruction* recv1 = FindInstruction(module.get(), "recv.1");
HloInstruction* send_done0 = FindInstruction(module.get(), "send-done.0");
EXPECT_TRUE(!recv1->control_predecessors().empty());
EXPECT_EQ(recv1->control_predecessors()[0], send_done0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/p2p_schedule_preparation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/p2p_schedule_preparation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c398408-d0f2-4cfb-a406-fe8a0f809c2d | cpp | tensorflow/tensorflow | device_list | third_party/xla/xla/python/ifrt/device_list.cc | third_party/xla/xla/python/ifrt/device_list_test.cc | #include "xla/python/ifrt/device_list.h"
#include <atomic>
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/optimization.h"
#include "absl/hash/hash.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device.pb.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char DeviceList::ID = 0;
char BasicDeviceList::ID = 0;
absl::StatusOr<tsl::RCReference<DeviceList>> DeviceList::FromProto(
LookupDeviceFunc lookup_device, const DeviceListProto& proto) {
BasicDeviceList::Devices devices;
devices.reserve(proto.device_ids_size());
for (int device_id : proto.device_ids()) {
TF_ASSIGN_OR_RETURN(Device * device, lookup_device(DeviceId(device_id)));
devices.push_back(device);
}
return BasicDeviceList::Create(std::move(devices));
}
DeviceListProto DeviceList::ToProto() const {
DeviceListProto proto;
proto.mutable_device_ids()->Reserve(devices().size());
for (Device* device : devices()) {
proto.mutable_device_ids()->AddAlreadyReserved(device->Id().value());
}
return proto;
}
tsl::RCReference<DeviceList> BasicDeviceList::Create(Devices devices) {
return tsl::MakeRef<BasicDeviceList>(std::move(devices));
}
BasicDeviceList::BasicDeviceList(Devices devices)
: devices_(std::move(devices)), hash_(kUnsetHash) {}
DeviceList* BasicDeviceList::AddressableDeviceList() const {
absl::call_once(addressable_device_list_cache_.once_flag, [this] {
Devices addressable_devices;
for (Device* device : devices_) {
if (device->IsAddressable()) {
addressable_devices.push_back(device);
}
}
const bool already_fully_addressable =
addressable_devices.size() == devices_.size();
if (already_fully_addressable) {
addressable_device_list_cache_.device_list =
const_cast<BasicDeviceList*>(this);
} else {
addressable_device_list_cache_.device_list_holder =
BasicDeviceList::Create(std::move(addressable_devices));
addressable_device_list_cache_.device_list =
addressable_device_list_cache_.device_list_holder.get();
}
});
return addressable_device_list_cache_.device_list;
}
uint64_t BasicDeviceList::hash() const {
uint64_t hash = hash_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) {
hash = absl::HashOf(devices());
if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) {
++hash;
}
hash_.store(hash, std::memory_order_relaxed);
}
return hash;
}
std::string BasicDeviceList::ToString() const {
return absl::StrCat("BasicDeviceList([",
absl::StrJoin(devices_, ",",
[](std::string* out, Device* device) {
absl::StrAppend(out,
device->DebugString());
}),
"])");
}
std::vector<DeviceId> GetDeviceIds(
const tsl::RCReference<DeviceList>& device_list) {
std::vector<DeviceId> ids;
ids.reserve(device_list->devices().size());
for (const Device* device : device_list->devices()) {
ids.push_back(device->Id());
}
return ids;
}
}
} | #include "xla/python/ifrt/device_list.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device.pb.h"
#include "xla/python/ifrt/device_test_util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
class DeviceListTest : public test_util::DeviceTest {};
TEST_P(DeviceListTest, ToFromProto) {
auto device_list = GetDevices({0, 1});
DeviceListProto proto = device_list->ToProto();
auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> {
return client()->LookupDevice(device_id);
};
TF_ASSERT_OK_AND_ASSIGN(auto device_list_copy,
DeviceList::FromProto(lookup_device_func, proto));
EXPECT_EQ(*device_list_copy, *device_list);
}
TEST_P(DeviceListTest, AddressableDevices) {
auto device_list = GetDevices({0, 1});
std::vector<Device*> addressable_devices;
for (Device* device : device_list->devices()) {
if (device->IsAddressable()) {
addressable_devices.push_back(device);
}
}
EXPECT_THAT(device_list->AddressableDeviceList()->devices(),
ElementsAreArray(addressable_devices));
}
TEST_P(DeviceListTest, AddressableDevicesFromConcurrentCalls) {
auto device_list = GetDevices({0, 1});
const int num_threads = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "test_pool",
std::min(num_threads, tsl::port::MaxParallelism()));
std::vector<DeviceList*> addressable_device_lists(num_threads);
for (int i = 0; i < num_threads; ++i) {
thread_pool->Schedule([&, i]() {
addressable_device_lists[i] = device_list->AddressableDeviceList();
addressable_device_lists[i]->devices().front()->Id();
});
}
thread_pool.reset();
for (int i = 0; i < num_threads; ++i) {
EXPECT_EQ(*addressable_device_lists[i],
*device_list->AddressableDeviceList());
}
}
TEST_P(DeviceListTest, IdenticalHashFromConcurrentCalls) {
auto device_list = GetDevices({0, 1});
const int num_threads = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "test_pool",
std::min(num_threads, tsl::port::MaxParallelism()));
std::vector<uint64_t> hashes(num_threads);
for (int i = 0; i < num_threads; ++i) {
thread_pool->Schedule([&, i]() { hashes[i] = device_list->hash(); });
}
thread_pool.reset();
for (int i = 0; i < num_threads; ++i) {
EXPECT_EQ(hashes[i], device_list->hash());
}
EXPECT_NE(device_list->hash(), 0);
}
TEST_P(DeviceListTest, EqualityTest) {
auto device_list1 = GetDevices({0, 1});
auto device_list2 = GetDevices({0, 1});
EXPECT_EQ(*device_list1, *device_list2);
auto device_list3 = device_list1;
EXPECT_EQ(*device_list1, *device_list3);
auto device_list4 = std::move(device_list2);
EXPECT_EQ(*device_list1, *device_list4);
auto device_list5 = GetDevices({0});
EXPECT_NE(*device_list1, *device_list5);
auto device_list6 = GetDevices({1, 0});
EXPECT_NE(*device_list1, *device_list6);
}
INSTANTIATE_TEST_SUITE_P(
NumDevices, DeviceListTest,
testing::Values(test_util::DeviceTestParam{2,
1},
test_util::DeviceTestParam{2,
2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/device_list.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/device_list_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
629082a0-250e-48af-a434-7fe64da908c3 | cpp | tensorflow/tensorflow | tracked_device_buffer | third_party/xla/xla/pjrt/tracked_device_buffer.cc | third_party/xla/xla/pjrt/tracked_device_buffer_test.cc | #include "xla/pjrt/tracked_device_buffer.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/pjrt/event_pool.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/executable.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/event.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
namespace xla {
void BufferSequencingEvent::SetSequencingEvent(EventPool::Handle event,
se::Stream* stream) {
{
absl::MutexLock lock(&mu_);
defined_status_.emplace(absl::OkStatus());
CHECK(!event_.event());
event_ = std::move(event);
CHECK(streams_defined_on_.empty());
streams_defined_on_.push_back(stream);
sequence_number_.store(event_.sequence_number(), std::memory_order_seq_cst);
}
this->ExecuteFutureTasks();
}
bool BufferSequencingEvent::EventHasBeenRecorded() const {
return event_.event() != nullptr;
}
bool BufferSequencingEvent::IsDefinedNoLock() const {
return defined_status_.IsConcrete();
}
uint64_t BufferSequencingEvent::sequence_number() const {
uint64_t seq = sequence_number_.load(std::memory_order_seq_cst);
return seq;
}
void BufferSequencingEvent::WaitForEventOnStream(se::Stream* stream) {
absl::MutexLock lock(&mu_);
mu_.Await(
absl::Condition(this, &BufferSequencingEvent::EventHasBeenRecorded));
if (std::find(streams_defined_on_.begin(), streams_defined_on_.end(),
stream) != streams_defined_on_.end()) {
return;
}
stream->WaitFor(event_.event()).IgnoreError();
streams_defined_on_.push_back(stream);
}
absl::Status BufferSequencingEvent::WaitForEventOnExternalStream(
std::intptr_t stream) {
absl::MutexLock lock(&mu_);
mu_.Await(
absl::Condition(this, &BufferSequencingEvent::EventHasBeenRecorded));
return event_.event()->WaitForEventOnExternalStream(stream);
}
bool BufferSequencingEvent::IsPredeterminedErrorOrDefinedOn(
se::Stream* stream) {
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &BufferSequencingEvent::IsDefinedNoLock));
if (defined_status_.IsConcrete() && !defined_status_.get().ok()) {
return true;
}
return std::find(streams_defined_on_.begin(), streams_defined_on_.end(),
stream) != streams_defined_on_.end();
}
bool BufferSequencingEvent::IsComplete() {
absl::MutexLock lock(&mu_);
mu_.Await(
absl::Condition(this, &BufferSequencingEvent::EventHasBeenRecorded));
return event_.event()->PollForStatus() == se::Event::Status::kComplete;
}
void BufferSequencingEvent::ExecuteOrAddToFutureTasks(
const std::string& task_name, std::function<void()> task) {
tsl::profiler::TraceMeProducer producer(
"BufferSequencingEvent::ExecuteOrAddToFutureTasks",
tsl::profiler::ContextType::kPjRt);
uint64_t context_id = producer.GetContextId();
auto wrapped_task = [task = std::move(task), context_id]() {
tsl::profiler::TraceMeConsumer consumer("BufferSequencingEvent::Execute",
tsl::profiler::ContextType::kPjRt,
context_id);
task();
};
{
absl::MutexLock lock(&mu_);
if (!defined_status_.IsConcrete()) {
on_ready_tasks_callback_[task_name] = std::move(wrapped_task);
return;
}
}
thread_pool_->Schedule(std::move(wrapped_task));
}
void BufferSequencingEvent::ExecuteFutureTasks() {
absl::flat_hash_map<std::string, std::function<void()>>
on_ready_tasks_callback;
{
absl::MutexLock lock(&mu_);
on_ready_tasks_callback = std::move(on_ready_tasks_callback_);
}
auto call_all_task_callbacks = [on_ready_tasks_callback =
std::move(on_ready_tasks_callback)]() {
for (auto& [task_name, task_callback] : on_ready_tasks_callback) {
task_callback();
}
};
thread_pool_->Schedule(std::move(call_all_task_callbacks));
}
std::shared_ptr<TrackedDeviceBuffer>
TrackedDeviceBuffer::FromScopedShapedBuffer(
ScopedShapedBuffer* shaped_buffer,
absl::Span<const std::shared_ptr<BufferSequencingEvent>> definition_events,
PjRtDevice* device) {
ShapeTree<se::DeviceMemoryBase>::iterator iterator =
shaped_buffer->buffers().begin();
std::vector<se::DeviceMemoryBase> buffers;
buffers.reserve(1);
ShapeUtil::ForEachSubshape(
shaped_buffer->on_device_shape(), [&](const Shape&, const ShapeIndex&) {
CHECK(iterator != shaped_buffer->buffers().end());
buffers.push_back(iterator->second);
iterator->second = se::DeviceMemoryBase();
++iterator;
});
CHECK(iterator == shaped_buffer->buffers().end());
return std::make_shared<TrackedDeviceBuffer>(
shaped_buffer->memory_allocator(), device,
absl::Span<se::DeviceMemoryBase>(buffers), definition_events,
nullptr);
}
ShapedBuffer TrackedDeviceBuffer::AsShapedBuffer(
const Shape& on_device_shape) const {
ShapedBuffer shaped_buffer(on_device_shape,
device_->local_device_id().value(),
device_->local_hardware_id().value());
ShapeTree<se::DeviceMemoryBase>::iterator iterator =
shaped_buffer.buffers().begin();
for (const se::DeviceMemoryBase& buf : device_memory_) {
CHECK(iterator != shaped_buffer.buffers().end());
iterator->second = buf;
++iterator;
}
CHECK(iterator == shaped_buffer.buffers().end());
return shaped_buffer;
}
void TrackedDeviceBuffer::AddToInputAsImmutable(
ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator,
const ShapeTree<MaybeOwningDeviceMemory>::iterator& end) const {
for (const se::DeviceMemoryBase& buf : device_memory_) {
CHECK(*iterator != end);
(*iterator)->second = MaybeOwningDeviceMemory(buf);
++(*iterator);
}
}
void TrackedDeviceBuffer::AddToInputAsDonated(
ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator,
const ShapeTree<MaybeOwningDeviceMemory>::iterator& end,
ExecutionInput* execution_input,
se::DeviceMemoryAllocator* allocator) const {
for (const se::DeviceMemoryBase& buf : device_memory_) {
CHECK(*iterator != end);
(*iterator)->second = MaybeOwningDeviceMemory(se::OwningDeviceMemory(
buf, device_->local_device_id().value(), allocator));
execution_input->SetUnownedIndex((*iterator)->first);
++(*iterator);
}
}
TrackedDeviceBuffer::TrackedDeviceBuffer(
se::DeviceMemoryAllocator* allocator, PjRtDevice* device,
absl::Span<se::DeviceMemoryBase const> device_memory,
absl::Span<const std::shared_ptr<BufferSequencingEvent>> definition_events,
absl::AnyInvocable<void() &&> on_delete_callback)
: allocator_(allocator),
device_(device),
device_memory_(device_memory.begin(), device_memory.end()),
definition_events_(std::make_move_iterator(definition_events.begin()),
std::make_move_iterator(definition_events.end())),
in_use_(true),
on_delete_callback_(std::move(on_delete_callback)) {}
TrackedDeviceBuffer::~TrackedDeviceBuffer() {
if (allocator_) {
for (const se::DeviceMemoryBase& buffer : device_memory_) {
absl::Status status =
allocator_->Deallocate(device_->local_device_id().value(), buffer);
if (!status.ok()) {
LOG(ERROR) << "Buffer deallocation failed: " << status;
}
}
}
if (on_delete_callback_) {
std::move(on_delete_callback_)();
}
}
void TrackedDeviceBuffer::AddUsageEvent(
se::Stream* usage_stream, std::shared_ptr<BufferSequencingEvent> event,
bool reference_held) {
CHECK(in_use_);
if (*event == 0) {
usage_events_.push_back({usage_stream, event, reference_held});
return;
}
for (auto& existing : usage_events_) {
if (*existing.event == 0) continue;
if (existing.stream == usage_stream) {
if (*existing.event < *event) {
existing.event = event;
existing.reference_held = reference_held;
}
return;
}
}
usage_events_.push_back({usage_stream, event, reference_held});
}
TrackedDeviceBuffer::StreamAndEventContainer
TrackedDeviceBuffer::LockUseAndTransferUsageEvents() {
CHECK(in_use_);
in_use_ = false;
return std::move(usage_events_);
}
void GetDeviceBufferEvents(
const TrackedDeviceBuffer& buffer, bool get_usage_events,
absl::flat_hash_set<BufferSequencingEvent*>* events) {
if (get_usage_events) {
for (const auto& e : buffer.usage_events()) {
events->insert(e.event.get());
}
} else {
for (const auto& e : buffer.definition_events()) {
events->insert(e.get());
}
}
}
void WaitForBufferDefinitionEventsOnStream(const TrackedDeviceBuffer& buffer,
se::Stream* stream) {
absl::flat_hash_set<BufferSequencingEvent*> events;
GetDeviceBufferEvents(buffer, false, &events);
for (BufferSequencingEvent* event : events) {
event->WaitForEventOnStream(stream);
}
}
} | #include "xla/pjrt/tracked_device_buffer.h"
#include <memory>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/test.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TestDevice : public PjRtDevice {
public:
TestDevice() = default;
PjRtLocalHardwareId local_hardware_id() const override {
return PjRtLocalHardwareId(0);
}
PjRtClient* client() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
bool IsAddressable() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
std::unique_ptr<ScopedAsyncTrackingEvent> CreateAsyncTrackingEvent(
absl::string_view description) const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
absl::Status TransferToInfeed(const LiteralSlice& literal) override {
return Unimplemented("Unimplemented for TestDeivce.");
}
absl::Status TransferFromOutfeed(MutableBorrowingLiteral literal) override {
return Unimplemented("Unimplemented for TestDeivce.");
}
absl::Span<PjRtMemorySpace* const> memory_spaces() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
absl::StatusOr<PjRtMemorySpace*> default_memory_space() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
};
absl::StatusOr<std::shared_ptr<TrackedDeviceBuffer>> MakeArray(
const Shape& shape, LocalClient* client, PjRtDevice* device) {
std::vector<stream_executor::DeviceMemoryBase> device_buffers;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
client->backend().transfer_manager()->HostShapeToDeviceShape(shape),
[&](const Shape& subshape, const ShapeIndex&) -> absl::Status {
TF_ASSIGN_OR_RETURN(
se::OwningDeviceMemory device_memory,
client->backend().memory_allocator()->Allocate(
0,
client->backend().transfer_manager()->GetByteSizeRequirement(
subshape)));
device_buffers.push_back(device_memory.Release());
return absl::OkStatus();
}));
return std::make_shared<TrackedDeviceBuffer>(
client->backend().memory_allocator(), device, device_buffers,
absl::Span<const std::shared_ptr<BufferSequencingEvent>>(), nullptr);
}
TEST(TrackedDeviceBufferTest, AsShapedBuffer) {
LocalClient* client = ClientLibrary::LocalClientOrDie();
TestDevice device;
Shape a_shape = ShapeUtil::MakeShape(F32, {3, 101, 4});
Shape b_shape = ShapeUtil::MakeShape(S8, {77});
Shape c_shape = ShapeUtil::MakeShape(S64, {});
TF_ASSERT_OK_AND_ASSIGN(auto a_buffer, MakeArray(a_shape, client, &device));
TF_ASSERT_OK_AND_ASSIGN(auto b_buffer, MakeArray(b_shape, client, &device));
TF_ASSERT_OK_AND_ASSIGN(auto c_buffer, MakeArray(c_shape, client, &device));
ASSERT_EQ(a_buffer->device_memory().size(), 1);
ASSERT_EQ(b_buffer->device_memory().size(), 1);
ASSERT_EQ(c_buffer->device_memory().size(), 1);
std::vector<se::DeviceMemoryBase> expected_buffer_sequence = {
a_buffer->device_memory()[0], b_buffer->device_memory()[0],
c_buffer->device_memory()[0]};
ShapedBuffer shaped_a = a_buffer->AsShapedBuffer(
client->backend().transfer_manager()->HostShapeToDeviceShape(a_shape));
ShapedBuffer shaped_b = b_buffer->AsShapedBuffer(
client->backend().transfer_manager()->HostShapeToDeviceShape(b_shape));
ShapedBuffer shaped_c = c_buffer->AsShapedBuffer(
client->backend().transfer_manager()->HostShapeToDeviceShape(c_shape));
auto expected_it = expected_buffer_sequence.begin();
for (auto it = shaped_a.buffers().begin(); it != shaped_a.buffers().end();
++it) {
ASSERT_TRUE(expected_it != expected_buffer_sequence.end());
EXPECT_TRUE(expected_it->IsSameAs(it->second));
++expected_it;
}
for (auto it = shaped_b.buffers().begin(); it != shaped_b.buffers().end();
++it) {
ASSERT_TRUE(expected_it != expected_buffer_sequence.end());
EXPECT_TRUE(expected_it->IsSameAs(it->second));
++expected_it;
}
for (auto it = shaped_c.buffers().begin(); it != shaped_c.buffers().end();
++it) {
ASSERT_TRUE(expected_it != expected_buffer_sequence.end());
EXPECT_TRUE(expected_it->IsSameAs(it->second));
++expected_it;
}
EXPECT_TRUE(expected_it == expected_buffer_sequence.end());
}
TEST(TrackedDeviceBufferTest, FromScopedShapedBuffer) {
TestDevice device;
LocalClient* client = ClientLibrary::LocalClientOrDie();
Literal literal = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout<float>({10, 3, 7}, 33.4f),
LiteralUtil::One(S64));
TF_ASSERT_OK_AND_ASSIGN(
ScopedShapedBuffer shaped_buffer,
client->LiteralToShapedBuffer(literal, 0));
std::shared_ptr<TrackedDeviceBuffer> device_buffer =
TrackedDeviceBuffer::FromScopedShapedBuffer(&shaped_buffer, {}, &device);
EXPECT_EQ(device_buffer->device_memory().size(),
ShapeUtil::SubshapeCount(
client->backend().transfer_manager()->HostShapeToDeviceShape(
literal.shape())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tracked_device_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tracked_device_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
154f17c3-5ac5-44f5-807f-66c9ebfb7ed3 | cpp | tensorflow/tensorflow | while | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/while.cc | tensorflow/lite/kernels/while_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/while.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
void TFLReplaceReturnOp(Region& region, PatternRewriter& rewriter) {
OpBuilder::InsertionGuard guard(rewriter);
for (auto& block : region.getBlocks()) {
Operation* terminator = block.getTerminator();
rewriter.setInsertionPoint(terminator);
rewriter.replaceOpWithNewOp<TFL::YieldOp>(terminator,
terminator->getOperands());
}
}
class LeagalizeWhileOp : public OpConversionPattern<mhlo::WhileOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::WhileOp while_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
auto is_stateless = rewriter.getBoolAttr(false);
auto new_while = rewriter.create<TFL::WhileOp>(
while_op.getLoc(), while_op->getResultTypes(), while_op->getOperands(),
is_stateless);
new_while.getCond().takeBody(while_op.getCond());
new_while.getBody().takeBody(while_op.getBody());
TFLReplaceReturnOp(new_while.getCond(), rewriter);
TFLReplaceReturnOp(new_while.getBody(), rewriter);
rewriter.replaceOp(while_op, new_while.getResults());
return success();
}
};
bool IsWhileLegal(mhlo::WhileOp while_op) {
for (auto type : while_op->getOperandTypes()) {
if (mlir::isa<TupleType>(type)) return true;
}
return false;
}
}
void PopulateWhilePatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
target.addDynamicallyLegalOp<mhlo::WhileOp>(IsWhileLegal);
patterns.add<LeagalizeWhileOp>(ctx);
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/profiling/memory_info.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::CheckScalarStringTensor;
using subgraph_test_util::CheckStringTensor;
using subgraph_test_util::ControlFlowOpTest;
using subgraph_test_util::FillIntTensor;
using subgraph_test_util::FillScalarStringTensor;
namespace {
class WhileTest : public ControlFlowOpTest {};
TEST_F(WhileTest, TestWithXNNPACK) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildFloatLessCondSubgraph(interpreter_->subgraph(1), 100);
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(2));
builder_->BuildFloatWhileSubgraph(&interpreter_->primary_subgraph(), 2);
const auto opt = TfLiteXNNPackDelegateOptionsDefault();
TfLiteDelegate* xnnpack_delegate = TfLiteXNNPackDelegateCreate(&opt);
interpreter_->primary_subgraph().MarkAsDelegationSkippable();
interpreter_->subgraph(1)->MarkAsDelegationSkippable();
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(xnnpack_delegate), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
float* input0 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[0]));
input0[0] = 1;
float* input1 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[1]));
input1[0] = 1;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
float* output0_data = GetTensorData<float>(output0);
ASSERT_EQ(output0_data[0], 256);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
float* output1_data = GetTensorData<float>(output1);
ASSERT_EQ(output1_data[0], 256);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteXNNPackDelegateDelete(xnnpack_delegate);
}
TEST_F(WhileTest, TestInputIsOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 3);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestInputIsOutputButDifferent) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 2);
builder_->BuildInputIsDifferentOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 2);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {5});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {8});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestFlexOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 2);
builder_->BuildFlexOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 2);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestCounterOnly) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 1);
builder_->BuildCounterOnlySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 1);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestAllCases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 5);
builder_->BuildAllInplaceScenariosSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 5);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {5});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {6}, {2, 2, 2, 2, 2, 2});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
CheckIntTensor(output3, {6}, {4, 4, 4, 4, 4, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestStaticUnconsumedOutputs) {
for (bool dynamic_tensors : {true, false}) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 3);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraphWithUnconsumedOutput(
&interpreter_->primary_subgraph(), 3);
InterpreterOptions options;
if (dynamic_tensors) {
options.OptimizeMemoryForLargeTensors(1);
interpreter_->ApplyOptions(&options);
}
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {8});
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
CheckIntTensor(output1, {2}, {8, 8});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestDynamicOpTriggersAllocationOfUnsedInput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 2, 3);
builder_->BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {4, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {2}, {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestStaticInPlace) {
const std::vector<int> expected = {6, 10, 15, 21, 28};
for (int i = 0; i < expected.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), i + 1);
builder_->BuildDeepBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {i + 2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {expected[i]});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestStaticInPlaceLarge) {
int size = 10000;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), 60000);
builder_->BuildLargeBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {size}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]),
std::vector<int>(size, 1));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {}, {10010 * size});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {size}, std::vector<int>(size, 70014));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestTriangularNumberSequence) {
const std::vector<int> expected = {1, 3, 6, 10, 15, 21, 28};
for (int i = 0; i < expected.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), i);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
EXPECT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {i + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {expected[i]});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestTriangularNumberSequenceWithShallowCopy) {
const std::vector<int> expected = {1, 3, 6, 10, 15, 21, 28};
for (int i = 0; i < expected.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), i);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1000000});
InterpreterOptions options;
options.OptimizeMemoryForLargeTensors(1000000);
ASSERT_EQ(interpreter_->ApplyOptions(&options), kTfLiteOk);
const size_t initial_mem_usage =
profiling::memory::GetMemoryUsage().mem_footprint_kb;
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_LE(profiling::memory::GetMemoryUsage().mem_footprint_kb -
initial_mem_usage,
9000);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
const std::vector<int> input_vector(1000000, 1);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]),
input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
ASSERT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {i + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
const std::vector<int> expected2(1000000, expected[i]);
CheckIntTensor(output2, {1000000}, expected2);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(WhileTest, TestPadLoop) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), 4);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {5});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {14}, {0, 0, 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestDynamicBodyWithSharingEarlyExit) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 0, 4);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 4);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {10000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {1});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {3}, {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestDynamicBodyWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 4);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 4);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1000000});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {4});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {18},
{4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6, 4, 5, 6});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
EXPECT_EQ(output2->dims->data[0], 1000000);
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
EXPECT_EQ(output3->dims->data[0], 1000000);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestDynamicBodyWithSharingAndAliases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 0, 5);
builder_->BuildDynamicBodySubgraphWithAliases(interpreter_->subgraph(2));
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 5);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {3});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {1});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {11});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {1}, {12});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output3, {1}, {13});
TfLiteTensor* output4 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output4, {1}, {13});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestOutputNotConsumed) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 11, 3);
builder_->BuildOutputNotConsumedSubgraph(*interpreter_->subgraph(2));
builder_->BuildOutputNotConsumedWhileSubgraph(
&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {3}, {18, 18, 18});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestPadLoopWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeLessEqualCondSubgraph(interpreter_->subgraph(1), 3, 3);
builder_->BuildLargePadSubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputWhileSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {5});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {5}, {4, 9, 10, 4, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {8}, {0, 4, 9, 10, 4, 4, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestPadLoopWithShallowCopy) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), 3);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
std::vector<int> input_vector(1000000, 0);
input_vector[0] = 5;
input_vector[1] = 7;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
std::vector<int> output_vector(1000009, 0);
output_vector[3] = 5;
output_vector[4] = 7;
CheckIntTensor(output2, {1000009}, output_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(WhileTest, TestWhileLoopWithDynamicTensor) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraphWithDynamicTensor(
interpreter_->subgraph(1), 3);
builder_->BuildBodySubgraphWithDynamicTensor(interpreter_->subgraph(2));
builder_->BuildWhileSubgraphWithDynamicTensor(
&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[0]), "A");
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[1]), "A");
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* string_output1 =
interpreter_->tensor(interpreter_->outputs()[0]);
CheckScalarStringTensor(string_output1, "A");
TfLiteTensor* string_output2 =
interpreter_->tensor(interpreter_->outputs()[1]);
CheckStringTensor(string_output2, {4}, {"A", "A", "A", "A"});
TfLiteTensor* integer_output =
interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(integer_output, {1}, {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/while.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/while_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b34593d-0aa1-4d3f-8b99-df1a2e09e923 | cpp | google/arolla | overloaded_expr_operator | arolla/expr/overloaded_expr_operator.cc | arolla/expr/overloaded_expr_operator_test.cc | #include "arolla/expr/overloaded_expr_operator.h"
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
OverloadedOperator::OverloadedOperator(absl::string_view name,
std::vector<ExprOperatorPtr> base_ops)
: ExprOperator(
name,
[name, &base_ops] {
FingerprintHasher hasher("arolla::expr::OverloadedOperator");
hasher.Combine(name, base_ops.size());
for (const auto& base_op : base_ops) {
hasher.Combine(base_op->fingerprint());
}
return std::move(hasher).Finish();
}()),
base_ops_(std::move(base_ops)) {}
absl::StatusOr<ExprOperatorSignature> OverloadedOperator::GetSignature() const {
if (base_ops_.empty()) {
return absl::InvalidArgumentError("no base operators");
}
return base_ops_.front()->GetSignature();
}
absl::StatusOr<std::string> OverloadedOperator::GetDoc() const {
if (base_ops_.empty()) {
return absl::InvalidArgumentError("no base operators");
}
return base_ops_.front()->GetDoc();
}
absl::Span<const ExprOperatorPtr> OverloadedOperator::base_ops() const {
return base_ops_;
}
absl::StatusOr<ExprOperatorPtr> OverloadedOperator::LookupOp(
absl::Span<const ExprAttributes> inputs) const {
auto lookup_result = LookupImpl(inputs);
if (!lookup_result.ok()) {
return std::move(lookup_result).status();
}
return std::get<ExprOperatorPtr>(*lookup_result);
}
absl::StatusOr<ExprAttributes> OverloadedOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
auto lookup_result = LookupImpl(inputs);
if (!lookup_result.ok()) {
return std::move(lookup_result).status();
}
return std::get<ExprAttributes>(*lookup_result);
}
absl::StatusOr<ExprNodePtr> OverloadedOperator::ToLowerLevel(
const ExprNodePtr& node) const {
auto lookup_result = LookupImpl(GetExprAttrs(node->node_deps()));
if (!lookup_result.ok()) {
return std::move(lookup_result).status();
}
auto& op = std::get<ExprOperatorPtr>(*lookup_result);
auto& attr = std::get<ExprAttributes>(*lookup_result);
if (op == nullptr) {
return node;
}
return ExprNode::UnsafeMakeOperatorNode(
std::move(op), std::vector(node->node_deps()), std::move(attr));
}
absl::StatusOr<std::tuple<ExprOperatorPtr, ExprAttributes>>
OverloadedOperator::LookupImpl(absl::Span<const ExprAttributes> inputs) const {
for (const auto& base_op : base_ops_) {
auto status_or = base_op->InferAttributes(inputs);
if (absl::IsInvalidArgument(status_or.status())) {
continue;
}
if (!status_or.ok()) {
return status_or.status();
}
if (!status_or->qtype()) {
return std::make_tuple(ExprOperatorPtr{}, ExprAttributes{});
}
return std::make_tuple(base_op, *std::move(status_or));
}
if (inputs.size() == 1) {
return absl::InvalidArgumentError(
absl::StrFormat("unsupported argument type %s",
inputs[0].qtype() ? inputs[0].qtype()->name() : "*"));
}
return absl::InvalidArgumentError(
absl::StrFormat("unsupported argument types (%s)",
absl::StrReplaceAll(JoinTypeNames(GetAttrQTypes(inputs)),
{{"NULL", "*"}})));
}
absl::string_view OverloadedOperator::py_qvalue_specialization_key() const {
return "::arolla::expr::OverloadedOperator";
}
} | #include "arolla/expr/overloaded_expr_operator.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::testing::DummyOp;
using ::arolla::testing::EqualsAttr;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::InvokeExprOperator;
using ::testing::HasSubstr;
using Attr = ExprAttributes;
TEST(OverloadedOperatorTest, SmokeTest) {
ASSERT_OK_AND_ASSIGN(
auto double_op,
MakeOverloadedOperator(
"Double",
MakeLambdaOperator(
CallOp("math.add", {Placeholder("x"), Placeholder("x")})),
MakeLambdaOperator(
CallOp("strings.join", {Placeholder("x"), Placeholder("x")}))));
EXPECT_THAT(InvokeExprOperator<int>(double_op, 1), IsOkAndHolds(2));
EXPECT_THAT(InvokeExprOperator<double>(double_op, 1.5), IsOkAndHolds(3.));
EXPECT_THAT(InvokeExprOperator<Bytes>(double_op, Bytes("abc")),
IsOkAndHolds(Bytes("abcabc")));
EXPECT_THAT(double_op->InferAttributes({Attr(GetQType<bool>())}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unsupported argument type BOOLEAN")));
EXPECT_THAT(double_op->InferAttributes(
{Attr(GetQType<int32_t>()), Attr(GetQType<int64_t>())}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unsupported argument types (INT32,INT64)")));
}
TEST(OverloadedOperatorTest, UsingLiteralValues) {
ASSERT_OK_AND_ASSIGN(auto lambda_signature,
ExprOperatorSignature::Make("x, y"));
ASSERT_OK_AND_ASSIGN(
auto with_qtype_op,
MakeOverloadedOperator(
"WithQType",
MakeLambdaOperator(lambda_signature,
CallOp(QTypeAnnotation::Make(),
{Placeholder("x"), Placeholder("y")})),
MakeLambdaOperator(
lambda_signature,
CallOp("strings.join", {Placeholder("x"), Placeholder("y")}))));
EXPECT_THAT(with_qtype_op->InferAttributes(
{Attr{}, Attr(TypedValue::FromValue(GetQType<int32_t>()))}),
IsOkAndHolds(EqualsAttr(GetQType<int>())));
EXPECT_THAT(with_qtype_op->InferAttributes(
{Attr(GetQType<Bytes>()), Attr(GetQType<Bytes>())}),
IsOkAndHolds(EqualsAttr(GetQType<Bytes>())));
EXPECT_THAT(with_qtype_op->InferAttributes({Attr{}, Attr{}}),
IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(
with_qtype_op->InferAttributes({Attr(GetQType<Bytes>()), Attr{}, Attr{}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unsupported argument types (BYTES,*,*)")));
}
TEST(OverloadedOperatorTest, GetDoc) {
auto op_1 = std::make_shared<testing::DummyOp>(
"dummy_op_1", ExprOperatorSignature::MakeVariadicArgs(),
"dummy_docstring_1");
auto op_2 = std::make_shared<testing::DummyOp>(
"dummy_op_2", ExprOperatorSignature::MakeVariadicArgs(),
"dummy_docstring_2");
OverloadedOperator op("overloaded_op", {op_1, op_2});
ASSERT_THAT(op.GetDoc(), IsOkAndHolds("dummy_docstring_1"));
}
TEST(OverloadedOperatorTest, Empty) {
OverloadedOperator op("empty", {});
ASSERT_THAT(op.GetSignature(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no base operators")));
ASSERT_THAT(op.GetDoc(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no base operators")));
}
TEST(OverloadedOperatorTest, ResolutionOrder) {
ASSERT_OK_AND_ASSIGN(
auto op,
MakeOverloadedOperator(
"dispatch", LookupOperator("core.identity"),
MakeLambdaOperator(ExprOperatorSignature::Make("_"), Literal(1))));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Placeholder("x")}));
EXPECT_EQ(expr->qtype(), nullptr);
}
TEST(OverloadedOperatorTest, Lowering) {
ASSERT_OK_AND_ASSIGN(auto double_add_op,
MakeLambdaOperator(CallOp(
"math.add", {Placeholder("x"), Placeholder("x")})));
ASSERT_OK_AND_ASSIGN(
auto double_op,
MakeOverloadedOperator(
"Double", double_add_op,
MakeLambdaOperator(
CallOp("strings.join", {Placeholder("x"), Placeholder("x")}))));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(double_op, {Literal(1.0)}));
EXPECT_THAT(ToLowerNode(expr),
IsOkAndHolds(EqualsExpr(CallOp(double_add_op, {Literal(1.0)}))));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/overloaded_expr_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/overloaded_expr_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
111cb924-fbfa-4fec-8fee-aef598b480b7 | cpp | tensorflow/tensorflow | verifier_internal | tensorflow/lite/core/tools/verifier_internal.cc | tensorflow/lite/core/tools/verifier_internal_test.cc | #include "tensorflow/lite/core/tools/verifier_internal.h"
#include <stddef.h>
#include <stdint.h>
#include "flatbuffers/verifier.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace internal {
const Model* VerifyFlatBufferAndGetModel(const void* buf, size_t len) {
::flatbuffers::Verifier verifier(static_cast<const uint8_t*>(buf), len);
if (VerifyModelBuffer(verifier)) {
return ::tflite::GetModel(buf);
} else {
return nullptr;
}
}
}
} | #include "tensorflow/lite/core/tools/verifier_internal.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
namespace tflite {
class TfLiteFlatbufferModelBuilder {
public:
TfLiteFlatbufferModelBuilder() {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
}
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
const std::vector<std::string>& custom_ops) {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
for (const auto& iter : builtin_ops) {
resolver_.AddBuiltin(iter, &fake_op_);
}
for (const auto& iter : custom_ops) {
resolver_.AddCustom(iter.data(), &fake_op_);
}
}
void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
const std::vector<uint8_t>& buffer, const char* name,
const bool is_variable = false) {
int buffer_index = 0;
if (!buffer.empty()) {
buffer_index = buffers_.size();
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
}
if (shape.empty()) {
tensors_.push_back(CreateTensorDirect(builder_, nullptr, type,
buffer_index, name,
0, is_variable));
return;
}
tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
name, 0,
is_variable));
}
void AddOperator(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
tflite::BuiltinOperator builtin_op, const char* custom_op) {
operator_codes_.push_back(
CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
operators_.push_back(CreateOperator(
builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), BuiltinOptions_NONE,
0,
0, tflite::CustomOptionsFormat_FLEXBUFFERS));
}
enum BuilderMode {
kBuilderModeEmptyVectorIsEmpty,
kBuilderModeEmptyVectorIsNull,
kBuilderModeDefault = kBuilderModeEmptyVectorIsEmpty,
};
void FinishModel(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
BuilderMode mode = kBuilderModeDefault) {
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>({CreateSubGraph(
builder_, CreateVector(tensors_, mode), CreateVector(inputs, mode),
CreateVector(outputs, mode), CreateVector(operators_, mode),
builder_.CreateString("test_subgraph"))});
auto result = CreateModel(
builder_, TFLITE_SCHEMA_VERSION, CreateVector(operator_codes_, mode),
CreateVector(subgraph, mode), builder_.CreateString("test_model"),
CreateVector(buffers_, mode));
tflite::FinishModelBuffer(builder_, result);
}
bool Verify(const void* buf, size_t length) {
return tflite::internal::VerifyFlatBufferAndGetModel(buf, length);
}
bool Verify() {
return Verify(builder_.GetBufferPointer(), builder_.GetSize());
}
private:
template <typename T>
flatbuffers::Offset<flatbuffers::Vector<T>> CreateVector(
const std::vector<T>& v, BuilderMode mode) {
if (mode == kBuilderModeEmptyVectorIsNull && v.empty()) {
return 0;
}
return builder_.CreateVector(v);
}
flatbuffers::FlatBufferBuilder builder_;
MutableOpResolver resolver_;
TfLiteRegistration fake_op_{};
std::vector<flatbuffers::Offset<Operator>> operators_;
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes_;
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<Buffer>> buffers_;
};
TEST(VerifyModel, TestEmptyModel) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0, 0,
0, 0);
::tflite::FinishModelBuffer(builder, model);
ASSERT_TRUE(::tflite::internal::VerifyFlatBufferAndGetModel(
builder.GetBufferPointer(), builder.GetSize()));
}
TEST(VerifyModel, TestSimpleModel) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
}
TEST(VerifyModel, TestCorruptedData) {
std::string model = "123";
ASSERT_FALSE(::tflite::internal::VerifyFlatBufferAndGetModel(model.data(),
model.size()));
}
TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
builder.GetSize());
for (size_t i = 0; i < model_content.size(); i++) {
model_content[i] = (model_content[i] + 137) % 255;
EXPECT_FALSE(tflite::internal::VerifyFlatBufferAndGetModel(
model_content.data(), model_content.size()))
<< "Fail at position: " << i;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8929e73e-7c5f-4fe5-94e1-a64bdd7e2baf | cpp | tensorflow/tensorflow | dtensor_location | tensorflow/dtensor/mlir/dtensor_location.cc | tensorflow/dtensor/mlir/dtensor_location_test.cc | #include "tensorflow/dtensor/mlir/dtensor_location.h"
#include <algorithm>
#include <queue>
#include <string>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/utils/name_utils.h"
namespace tensorflow {
namespace dtensor {
namespace {
std::string CreateLocalLocationString(mlir::FileLineColLoc loc) {
return llvm::formatv(">> {0}:{1}:{2}", loc.getFilename(), loc.getLine(),
loc.getColumn())
.str();
}
}
mlir::Location DTensorLocation(mlir::Location loc, llvm::StringRef file,
unsigned int line, llvm::StringRef name) {
auto split = file.rsplit("/");
if (!split.second.empty()) file = split.second;
mlir::Location callee_loc =
mlir::FileLineColLoc::get(loc.getContext(), file, line, 0);
std::string new_name = GetNameFromLoc(loc);
if (!new_name.empty()) {
if (!name.empty()) {
new_name = llvm::formatv("{0}/{1}", new_name, name).str();
}
callee_loc = mlir::NameLoc::get(
mlir::StringAttr::get(loc.getContext(), new_name), callee_loc);
}
return mlir::CallSiteLoc::get(callee_loc, loc);
}
mlir::Location DTensorLocation(mlir::Operation* op, llvm::StringRef file,
unsigned int line, llvm::StringRef name) {
return DTensorLocation(op->getLoc(), file, line, name);
}
std::string DTensorLocationToString(mlir::Location loc) {
llvm::SmallVector<std::string, 4> stack;
std::queue<mlir::Location> queue;
queue.push(loc);
while (!queue.empty()) {
mlir::Location& front = queue.front();
if (auto name_loc = mlir::dyn_cast<mlir::NameLoc>(front)) {
queue.push(name_loc.getChildLoc());
} else if (auto callsite_loc = mlir::dyn_cast<mlir::CallSiteLoc>(front)) {
queue.push(callsite_loc.getCallee());
queue.push(callsite_loc.getCaller());
} else if (auto line_loc = mlir::dyn_cast<mlir::FileLineColLoc>(front)) {
stack.push_back(CreateLocalLocationString(line_loc));
}
queue.pop();
}
std::reverse(stack.begin(), stack.end());
std::string s;
llvm::raw_string_ostream ss(s);
llvm::interleave(stack, ss, "\n");
return ss.str();
}
}
} | #include "tensorflow/dtensor/mlir/dtensor_location.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/utils/name_utils.h"
#include "tensorflow/core/platform/test.h"
namespace {
void CheckFileLineColLocation(mlir::Location loc, unsigned line,
unsigned column) {
ASSERT_TRUE(mlir::isa<mlir::FileLineColLoc>(loc));
auto file_line_col_loc = mlir::cast<mlir::FileLineColLoc>(loc);
EXPECT_EQ(file_line_col_loc.getFilename(), "test.cc");
EXPECT_EQ(file_line_col_loc.getLine(), line);
EXPECT_EQ(file_line_col_loc.getColumn(), column);
}
TEST(DTensorLocationTest, HandlesEmptyLocation) {
mlir::MLIRContext ctx;
mlir::Location loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20);
loc = tensorflow::dtensor::DTensorLocation(loc, "test.cc", 21);
ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(loc));
auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(loc);
CheckFileLineColLocation(callsite_loc.getCallee(), 21, 0);
CheckFileLineColLocation(callsite_loc.getCaller(), 10, 20);
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(loc), stack);
}
TEST(DTensorLocationTest, HandlesMultipleCalls) {
mlir::MLIRContext ctx;
mlir::Location test_loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 22);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 23);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 24);
auto verify_loc = test_loc;
for (int i = 0; i < 4; ++i) {
ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(verify_loc));
auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(verify_loc);
auto callee_loc = callsite_loc.getCallee();
CheckFileLineColLocation(callee_loc, 24 - i, 0);
verify_loc = callsite_loc.getCaller();
}
CheckFileLineColLocation(verify_loc, 10, 20);
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0
>> test.cc:22:0
>> test.cc:23:0
>> test.cc:24:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack);
}
TEST(DTensorLocationTest, HandlesNameLoc) {
mlir::MLIRContext ctx;
mlir::Location test_loc =
mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"),
mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20));
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21);
ASSERT_EQ(mlir::GetNameFromLoc(test_loc), "op");
ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(test_loc));
auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(test_loc);
mlir::Location caller_loc =
mlir::cast<mlir::CallSiteLoc>(test_loc).getCaller();
ASSERT_TRUE(mlir::isa<mlir::NameLoc>(caller_loc));
CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(caller_loc).getChildLoc(),
10, 20);
mlir::Location callee_loc = callsite_loc.getCallee();
ASSERT_TRUE(mlir::isa<mlir::NameLoc>(callee_loc));
CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(callee_loc).getChildLoc(),
21, 0);
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack);
}
TEST(DTensorLocationTest, HandlesNameLocWithName) {
mlir::MLIRContext ctx;
mlir::Location test_loc =
mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"),
mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20));
test_loc =
tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21, "nested");
EXPECT_EQ(mlir::GetNameFromLoc(test_loc), "op/nested");
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/mlir/dtensor_location.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/mlir/dtensor_location_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
39ede851-60b9-4b19-80df-cfd236d29c33 | cpp | google/tsl | fingerprint | tsl/platform/fingerprint.h | tsl/platform/fingerprint_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_FINGERPRINT_H_
#define TENSORFLOW_TSL_PLATFORM_FINGERPRINT_H_
#include "tsl/platform/platform.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
#if TSL_IS_IN_OSS
#define USE_OSS_FARMHASH
#endif
#ifdef USE_OSS_FARMHASH
#include <farmhash.h>
#else
#include "util/hash/farmhash_fingerprint.h"
#endif
namespace tsl {
struct Fprint128 {
uint64_t low64;
uint64_t high64;
};
inline bool operator==(const Fprint128& lhs, const Fprint128& rhs) {
return lhs.low64 == rhs.low64 && lhs.high64 == rhs.high64;
}
struct Fprint128Hasher {
size_t operator()(const Fprint128& v) const {
return static_cast<size_t>(v.low64);
}
};
namespace internal {
inline uint64_t ShiftMix(const uint64_t val) { return val ^ (val >> 47); }
}
inline uint64_t FingerprintCat64(const uint64_t fp1, const uint64_t fp2) {
static const uint64_t kMul = 0xc6a4a7935bd1e995ULL;
uint64_t result = fp1 ^ kMul;
result ^= internal::ShiftMix(fp2 * kMul) * kMul;
result *= kMul;
result = internal::ShiftMix(result) * kMul;
result = internal::ShiftMix(result);
return result;
}
inline uint64_t Fingerprint64(const absl::string_view s) {
#ifdef USE_OSS_FARMHASH
return ::util::Fingerprint64(s.data(), s.size());
#else
return farmhash::Fingerprint64(s.data(), s.size());
#endif
}
inline uint32_t Fingerprint32(const absl::string_view s) {
#ifdef USE_OSS_FARMHASH
return ::util::Fingerprint32(s.data(), s.size());
#else
return farmhash::Fingerprint32(s.data(), s.size());
#endif
}
inline Fprint128 Fingerprint128(const absl::string_view s) {
#ifdef USE_OSS_FARMHASH
const auto fingerprint = ::util::Fingerprint128(s.data(), s.size());
return {::util::Uint128Low64(fingerprint),
::util::Uint128High64(fingerprint)};
#else
const auto fingerprint = farmhash::Fingerprint128(s.data(), s.size());
return {absl::Uint128Low64(fingerprint), absl::Uint128High64(fingerprint)};
#endif
}
inline Fprint128 FingerprintCat128(const Fprint128& a, const Fprint128& b) {
return {FingerprintCat64(a.low64, b.low64),
FingerprintCat64(a.high64, b.high64)};
}
inline Fprint128 FingerprintCat128(const Fprint128& a, const uint64_t b) {
auto x = FingerprintCat64(a.low64, b);
return {x, FingerprintCat64(a.high64, x)};
}
}
#endif | #include "tsl/platform/fingerprint.h"
#include <unordered_set>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
TEST(Fingerprint64, IsForeverFrozen) {
EXPECT_EQ(15404698994557526151ULL, Fingerprint64("Hello"));
EXPECT_EQ(18308117990299812472ULL, Fingerprint64("World"));
}
TEST(Fingerprint128, IsForeverFrozen) {
{
const Fprint128 fingerprint = Fingerprint128("Hello");
EXPECT_EQ(1163506517679092766ULL, fingerprint.low64);
EXPECT_EQ(10829806600034513965ULL, fingerprint.high64);
}
{
const Fprint128 fingerprint = Fingerprint128("World");
EXPECT_EQ(14404540403896557767ULL, fingerprint.low64);
EXPECT_EQ(4859093245152058524ULL, fingerprint.high64);
}
}
TEST(Fingerprint128, Fprint128Hasher) {
const std::unordered_set<Fprint128, Fprint128Hasher> map = {{1, 2}, {3, 4}};
}
TEST(FingerprintCat64, IsForeverFrozen) {
EXPECT_EQ(16877292868973613377ULL,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_EQ(7158413233176775252ULL,
FingerprintCat64(Fingerprint64("World"), Fingerprint64("Hello")));
}
TEST(FingerprintCat64, Idempotence) {
const uint64_t orig =
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World"));
EXPECT_EQ(orig,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_NE(FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("Hi")),
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
EXPECT_EQ(orig,
FingerprintCat64(Fingerprint64("Hello"), Fingerprint64("World")));
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/fingerprint.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/fingerprint_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
f5affd7b-6c08-4985-9501-fbeefc796907 | cpp | tensorflow/tensorflow | real_imag_expander | third_party/xla/xla/service/real_imag_expander.cc | third_party/xla/xla/service/real_imag_expander_test.cc | #include "xla/service/real_imag_expander.h"
#include "xla/literal_util.h"
namespace xla {
bool RealImagExpander::InstructionMatchesPattern(HloInstruction* inst) {
return (inst->opcode() == HloOpcode::kReal ||
inst->opcode() == HloOpcode::kImag) &&
!ShapeUtil::ElementIsComplex(inst->operand(0)->shape());
}
absl::StatusOr<HloInstruction*> RealImagExpander::ExpandInstruction(
HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kReal) {
return inst->mutable_operand(0);
} else {
HloComputation* comp = inst->parent();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(inst->operand(0)->shape().element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(inst->shape(), zero, {}));
return zero;
}
}
} | #include "xla/service/real_imag_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
namespace m = match;
class RealImagExpanderTest : public HloTestBase {};
TEST_F(RealImagExpanderTest, RealWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule real_float
ENTRY main {
input = f32[4] parameter(0)
ROOT real = real(input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Parameter(0)));
}
TEST_F(RealImagExpanderTest, ImagWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule imag_float
ENTRY main {
input = f32[4,2,8] parameter(0)
ROOT imag = imag(input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Broadcast()));
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(RealImagExpanderTest, RealImagWithComplexInput) {
const char* kModuleStr = R"(
HloModule real_float
ENTRY main {
input = c64[4] parameter(0)
real = real(input)
imag = imag(input)
ROOT t = tuple(real, imag)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(RealImagExpanderTest, MultipleImagWithNonComplexInput) {
const char* kModuleStr = R"(
HloModule imag_float
ENTRY main {
input = f32[4,2,8] parameter(0)
imag1 = imag(input)
ROOT imag2 = imag(imag1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
auto param = module->entry_computation()->parameter_instruction(0);
HloInstruction* imag1 =
module->entry_computation()->root_instruction()->mutable_operand(0);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_imag,
MakeUnaryHlo(HloOpcode::kImag, param));
TF_ASSERT_OK(
module->entry_computation()->ReplaceInstruction(imag1, new_imag));
RealImagExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&expander, module.get()));
EXPECT_TRUE(result);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Broadcast()));
XLA_VLOG_LINES(1, module->ToString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/real_imag_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/real_imag_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61d47aca-8f82-4f8a-9165-05f24949318b | cpp | google/arolla | tuple_expr_operator | arolla/expr/tuple_expr_operator.cc | arolla/expr/tuple_expr_operator_test.cc | #include "arolla/expr/tuple_expr_operator.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
ExprOperatorPtr MakeTupleOperator::Make() {
static const absl::NoDestructor<ExprOperatorPtr> result(
std::make_shared<MakeTupleOperator>());
return *result;
}
MakeTupleOperator::MakeTupleOperator()
: ExprOperatorWithFixedSignature(
"core.make_tuple", ExprOperatorSignature::MakeVariadicArgs(),
"Returns a tuple constructed from the given arguments.",
FingerprintHasher("::arolla::expr::MakeTupleOperator").Finish()) {}
ExprAttributes MakeTupleOperator::StaticInferAttributes(
absl::Span<const ExprAttributes> inputs) {
if (!HasAllAttrQTypes(inputs)) {
return ExprAttributes{};
}
return ExprAttributes(MakeTupleQType(GetAttrQTypes(inputs)));
}
absl::StatusOr<ExprAttributes> MakeTupleOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
return StaticInferAttributes(inputs);
}
absl::StatusOr<ExprOperatorPtr> GetNthOperator::Make(int64_t index) {
if (index < 0) {
return absl::InvalidArgumentError(
absl::StrFormat("expected a non-negative index, got %d", index));
}
return std::make_shared<GetNthOperator>(index);
}
namespace {
std::string GetNthOperatorDocstring(int64_t index) {
if (index == 0) {
return "Returns the first field of a compound value.";
} else if (index == 1) {
return "Returns the second field of a compound value.";
} else if (index == 2) {
return "Returns the third field of a compound value.";
} else {
return absl::StrFormat("Returns the %dth field of a compound value.",
index + 1);
}
}
}
GetNthOperator::GetNthOperator(int64_t index)
: ExprOperatorWithFixedSignature(
absl::StrFormat("get_nth[%d]", index),
ExprOperatorSignature{{"value"}}, GetNthOperatorDocstring(index),
FingerprintHasher("::arolla::expr::GetNthOperator")
.Combine(index)
.Finish()),
index_(index) {}
absl::StatusOr<ExprAttributes> GetNthOperator::StaticInferAttributes(
int64_t index, const ExprAttributes& input) {
if (!input.qtype()) {
return ExprAttributes{};
}
const auto& fields = input.qtype()->type_fields();
if (fields.empty() && !IsTupleQType(input.qtype())) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected a compound type, got value: %s", input.qtype()->name()));
}
if (index < 0 || static_cast<size_t>(index) >= fields.size()) {
return absl::InvalidArgumentError(
absl::StrFormat("index out of range: n=%d, value.field_count=%d", index,
fields.size()));
}
if (!input.qvalue()) {
return ExprAttributes(fields[index].GetType());
}
return ExprAttributes(input.qvalue()->GetField(index));
}
absl::StatusOr<ExprAttributes> GetNthOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
return StaticInferAttributes(index_, inputs[0]);
}
absl::string_view GetNthOperator::py_qvalue_specialization_key() const {
return "::arolla::expr::GetNthOperator";
}
} | #include "arolla/expr/tuple_expr_operator.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_value.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::InvokeExprOperator;
TEST(TupleExprOperatorTest, Basics) {
ASSERT_OK_AND_ASSIGN(auto tuple,
CallOp(MakeTupleOperator::Make(),
{Literal<float>(2.f), Literal<int64_t>(3)}));
ASSERT_OK_AND_ASSIGN(auto first,
CallOp(std::make_shared<GetNthOperator>(0), {tuple}));
ASSERT_OK_AND_ASSIGN(auto second,
CallOp(std::make_shared<GetNthOperator>(1), {tuple}));
EXPECT_EQ(first->qtype(), GetQType<float>());
EXPECT_EQ(second->qtype(), GetQType<int64_t>());
}
TEST(TupleExprOperatorTest, InvokeMakeTuple) {
ASSERT_OK_AND_ASSIGN(
auto tuple, InvokeExprOperator<TypedValue>(MakeTupleOperator::Make(), 2.f,
int64_t{3}));
EXPECT_EQ(tuple.GetType(),
MakeTupleQType({GetQType<float>(), GetQType<int64_t>()}));
EXPECT_EQ(tuple.GetFieldCount(), 2);
EXPECT_THAT(tuple.GetField(0).As<float>(), IsOkAndHolds(2.f));
EXPECT_THAT(tuple.GetField(1).As<int64_t>(), IsOkAndHolds(3));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/tuple_expr_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/tuple_expr_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
d81c2368-f61b-45b4-9f04-9d2cbe7d1611 | cpp | tensorflow/tensorflow | gpu_cudamallocasync_allocator | third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator.cc | third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator_test.cc | #include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/util/env_var.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace stream_executor {
struct GpuCudaMallocAsyncAllocator::CudaState {
CUstream cuda_stream{};
CUmemoryPool pool{};
};
void GpuCudaMallocAsyncAllocator::PrintAllocatorStatisticsNoLock() {
std::map<size_t, int> size_map_histogram;
std::vector<std::string> ptr_size_string;
for (auto p : size_map_) {
if (VLOG_IS_ON(8)) {
ptr_size_string.push_back(
absl::StrCat("(", absl::Hex(p.first), ",", p.second) + ")");
}
size_map_histogram[p.second]++;
}
LOG(ERROR) << "Histogram of current allocation: (allocation_size_in_bytes, "
<< "nb_allocation_of_that_sizes), ...;";
for (auto p : size_map_histogram) {
LOG(ERROR) << p.first << ", " << p.second;
}
VLOG(8) << "\nThe sorted list of (ptr,size):";
VLOG(8) << absl::StrJoin(ptr_size_string, ",");
cuuint64_t mem_reserved_current;
if (auto result = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT,
&mem_reserved_current)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
cuuint64_t mem_used_current;
if (auto result = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_USED_MEM_CURRENT,
&mem_used_current)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
cuuint64_t mem_reserved_high;
if (auto result = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH,
&mem_reserved_high)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
cuuint64_t mem_used_high;
if (auto result = cuMemPoolGetAttribute(
cuda_state_->pool, CU_MEMPOOL_ATTR_USED_MEM_HIGH, &mem_used_high)) {
LOG(ERROR) << "Error while fetching extra cudaMallocAsync pool attribute: "
<< cuda::ToStatus(result);
}
LOG(ERROR) << "CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: "
<< mem_reserved_current;
LOG(ERROR) << "CU_MEMPOOL_ATTR_USED_MEM_CURRENT: " << mem_used_current;
LOG(ERROR) << "CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: " << mem_reserved_high;
LOG(ERROR) << "CU_MEMPOOL_ATTR_USED_MEM_HIGH: " << mem_used_high;
}
std::atomic<int> GpuCudaMallocAsyncAllocator::number_instantiated_(0);
GpuCudaMallocAsyncAllocator::GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId platform_device_id, bool create_new_pool,
size_t new_pool_size, bool reserve_memory, size_t reserve_memory_size,
bool sync_mode, bool compute_stats)
: cuda_state_{std::make_unique<CudaState>()},
name_(absl::StrCat("gpu_async_", platform_device_id.value())),
reserve_memory_(reserve_memory),
create_new_pool_(create_new_pool),
sync_mode_(sync_mode) {
++number_instantiated_;
stream_exec_ = GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
int driverVersion;
cuDriverGetVersion(&driverVersion);
VLOG(2) << "DRIVER VERSION: " << driverVersion;
if (driverVersion < 11020) {
LOG(FATAL)
<< "Disable cuda_malloc_async or update your CUDA driver to a version"
<< " compatible with CUDA 11.2 or higher."
<< " We detected a version compatible with: " << driverVersion;
}
if (platform_device_id.value() > 0 && driverVersion < 11030) {
CUcontext pctx;
if (auto result = cuDevicePrimaryCtxRetain(&pctx, 0))
LOG(FATAL)
<< "Failed to retain context: " << cuda::ToStatus(result);
}
gpu::ScopedActivateContext scoped_activation{stream_exec_};
if (auto status2 = cuDriverGetVersion(&driverVersion)) {
LOG(FATAL)
<< "Error while fetching driver version: " << cuda::ToStatus(status2);
}
int cuda_malloc_async_supported;
if (auto status =
cuDeviceGetAttribute(&cuda_malloc_async_supported,
CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED,
platform_device_id.value())) {
LOG(FATAL)
<< "On device: " << platform_device_id.value()
<< " Current driver: " << driverVersion
<< ". Failed to get device attribute : " << cuda::ToStatus(status);
}
if (!cuda_malloc_async_supported)
LOG(FATAL)
<< "TF_GPU_ALLOCATOR=cuda_malloc_async isn't currently supported on "
<< "GPU id " << platform_device_id.value() << ":"
<< " Possible causes: device not supported (request SM60+), driver too "
"old, "
<< " OS not supported, CUDA version too old(request CUDA11.2+).";
size_t pool_size;
if (create_new_pool_) {
pool_size = new_pool_size;
CUmemPoolProps pool_props;
memset(reinterpret_cast<void*>(&pool_props), 0, sizeof(pool_props));
pool_props.allocType = CU_MEM_ALLOCATION_TYPE_PINNED;
pool_props.handleTypes = CU_MEM_HANDLE_TYPE_NONE;
pool_props.location.id = platform_device_id.value();
pool_props.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
#if CUDA_VERSION >= 12030
pool_props.maxSize = new_pool_size;
#endif
if (auto status = cuMemPoolCreate(&cuda_state_->pool, &pool_props))
LOG(FATAL) <<
"Failed to create CUDA pool: " << cuda::ToStatus(status);
} else {
pool_size = reserve_memory_size;
if (auto status = cuDeviceGetDefaultMemPool(&cuda_state_->pool,
platform_device_id.value()))
LOG(FATAL) <<
"Failed to get default CUDA pool: " << cuda::ToStatus(status);
VLOG(2) << "using default memory pool " << cuda_state_->pool;
}
VLOG(1) << Name() << " CudaMallocAsync initialized on platform: "
<< platform_device_id.value() << " with pool size of: " << pool_size
<< " this ptr: " << this;
uint64_t release_threshold_64 = reserve_memory_size;
if (auto status = cuMemPoolSetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,
&release_threshold_64))
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << cuda::ToStatus(status);
if (compute_stats) {
stats_ = std::make_unique<tsl::AllocatorStats>();
stats_->bytes_limit = static_cast<int64_t>(pool_size);
}
bool deterministic = false;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_DETERMINISTIC_ALLOCATOR",
false, &deterministic));
if (deterministic) {
int disable = 0;
if (auto status = cuMemPoolSetAttribute(
cuda_state_->pool, CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC,
&disable)) {
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << cuda::ToStatus(status);
}
if (auto status = cuMemPoolSetAttribute(
cuda_state_->pool,
CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, &disable)) {
LOG(FATAL) <<
"Failed to set CUDA pool attribute: " << cuda::ToStatus(status);
}
}
static auto* all_pools_ = new std::vector<CUmemoryPool>();
static auto* all_ids_ = new std::vector<tsl::PlatformDeviceId>();
DCHECK(all_pools_->size() == all_ids_->size());
for (auto pool_item_ : *all_pools_) {
if (pool_item_ == cuda_state_->pool) {
VLOG(2) << Name()
<< " GpuCudaMallocAsyncAllocator pool already initialized. "
"PoolSize "
<< pool_size;
return;
}
}
for (int i = 0; i < all_pools_->size(); ++i) {
CUmemAccessDesc map;
map.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE;
map.location.id = (*all_ids_)[i].value();
map.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
VLOG(2) << "Setting access of the current pool to "
<< " location id: " << map.location.id;
int canAccessPeer;
if (auto status = cuDeviceCanAccessPeer(
&canAccessPeer, platform_device_id.value(), map.location.id)) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "cuDeviceCanAccessPeer failed to know if GPU id "
<< map.location.id << " can access GPU id "
<< platform_device_id.value() << ": " << cuda::ToStatus(status);
}
if (canAccessPeer == 1) {
if (auto status = cuMemPoolSetAccess(cuda_state_->pool, &map, 1)) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "Error when setting access to the pool id: " << i
<< " location id: " << map.location.id
<< " error: " << cuda::ToStatus(status);
}
}
map.location.id = platform_device_id.value();
int previous_pool_id = (*all_ids_)[i].value();
VLOG(2) << "Set access to the pool id: " << previous_pool_id
<< " location id: " << map.location.id;
if (auto status = cuDeviceCanAccessPeer(&canAccessPeer, previous_pool_id,
platform_device_id.value())) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "cuDeviceCanAccessPeer failed: " << cuda::ToStatus(status);
}
if (canAccessPeer == 1) {
if (auto status = cuMemPoolSetAccess((*all_pools_)[i], &map, 1)) {
cuda_state_->pool = nullptr;
LOG(FATAL)
<< "Error when setting access to the pool id: " << previous_pool_id
<< " location id: " << map.location.id
<< " error: " << cuda::ToStatus(status);
}
}
}
all_pools_->push_back(cuda_state_->pool);
all_ids_->push_back(platform_device_id);
VLOG(2) << Name() << " GpuCudaMallocAsyncAllocator PoolSize " << pool_size;
}
GpuCudaMallocAsyncAllocator::GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId platform_device_id, size_t release_threshold,
bool reserve_memory, bool compute_stats)
: GpuCudaMallocAsyncAllocator(platform_device_id, false, 0, reserve_memory,
release_threshold, false, compute_stats) {}
GpuCudaMallocAsyncAllocator::~GpuCudaMallocAsyncAllocator() {
if (create_new_pool_) {
VLOG(2) << "Delete memory pool "
<< reinterpret_cast<void*>(cuda_state_->pool);
if (auto status = cuMemPoolDestroy(cuda_state_->pool))
LOG(FATAL) << "Failed to destroy memory pool:" << cuda::ToStatus(status);
}
}
void* GpuCudaMallocAsyncAllocator::AllocateRaw(size_t alignment,
size_t num_bytes) {
CHECK(cuda_state_->cuda_stream != nullptr)
<< "A stream must be added to the GpuCudaMallocAsync allocator";
if (cuda_state_->pool == nullptr) {
LOG(FATAL)
<< "The instantiation of GpuCudaMallocAsyncAllocator failed."
<< " See previous errors.";
}
std::optional<absl::MutexLock> lock;
if (stats_) {
lock.emplace(&mutex_);
}
gpu::ScopedActivateContext scoped_activation{stream_exec_};
void* ptr = nullptr;
auto result =
cuMemAllocFromPoolAsync(reinterpret_cast<CUdeviceptr*>(&ptr), num_bytes,
cuda_state_->pool, cuda_state_->cuda_stream);
if (result == CUDA_ERROR_OUT_OF_MEMORY) {
cuStreamSynchronize(cuda_state_->cuda_stream);
result =
cuMemAllocFromPoolAsync(reinterpret_cast<CUdeviceptr*>(&ptr), num_bytes,
cuda_state_->pool, cuda_state_->cuda_stream);
}
if (result) {
size_t free, total;
cuMemGetInfo(&free, &total);
LOG(ERROR) << Name() << " cuMemAllocAsync failed to allocate " << num_bytes
<< " bytes: " << cuda::ToStatus(result)
<< "\n Reported by CUDA: Free memory/Total memory: " << free
<< "/" << total;
if (stats_) {
LOG(ERROR) << "Stats: " << stats_->DebugString();
PrintAllocatorStatisticsNoLock();
}
return nullptr;
}
if (sync_mode_) {
cuStreamSynchronize(cuda_state_->cuda_stream);
}
if (stats_) {
++(stats_->num_allocs);
stats_->bytes_in_use += num_bytes;
if (stats_->bytes_in_use > stats_->peak_bytes_in_use) {
VLOG(9) << "New Peak memory usage of " << stats_->bytes_in_use
<< " bytes.";
}
stats_->peak_bytes_in_use =
std::max(stats_->peak_bytes_in_use, stats_->bytes_in_use);
stats_->largest_alloc_size =
std::max<std::size_t>(stats_->largest_alloc_size, num_bytes);
bool ptr_inserted = size_map_.emplace(ptr, num_bytes).second;
DCHECK(ptr_inserted);
}
VLOG(10) << Name() << " Allocated " << num_bytes << " at " << ptr;
return ptr;
}
void GpuCudaMallocAsyncAllocator::DeallocateRaw(void* ptr) {
if (ptr == nullptr) return;
std::optional<absl::MutexLock> lock;
if (stats_) {
lock.emplace(&mutex_);
}
if (auto result = cuMemFreeAsync(reinterpret_cast<const CUdeviceptr&>(ptr),
cuda_state_->cuda_stream)) {
if (result == CUDA_ERROR_DEINITIALIZED) {
VLOG(1) << "Ignoring CUDA error: " << cuda::ToStatus(result);
} else {
size_t free, total;
gpu::ScopedActivateContext scoped_activation{stream_exec_};
cuMemGetInfo(&free, &total);
LOG(ERROR) << "cudaFreeAsync failed to free " << ptr << ": "
<< cuda::ToStatus(result)
<< "\n Free memory/Total memory: " << free << "/" << total;
if (stats_) {
LOG(ERROR) << "Stats: " << stats_->DebugString();
}
}
}
if (sync_mode_) {
cuStreamSynchronize(cuda_state_->cuda_stream);
}
if (stats_) {
DCHECK(size_map_.contains(ptr));
size_t size = size_map_[ptr];
stats_->bytes_in_use -= size;
size_map_.erase(ptr);
}
VLOG(10) << Name() << " Freed ptr: " << ptr;
}
bool GpuCudaMallocAsyncAllocator::TracksAllocationSizes() const {
return static_cast<bool>(stats_);
}
size_t GpuCudaMallocAsyncAllocator::RequestedSize(const void* ptr) const {
if (!stats_ || !ptr) return 0;
absl::MutexLock l(&mutex_);
return size_map_.at(ptr);
}
size_t GpuCudaMallocAsyncAllocator::AllocatedSize(const void* ptr) const {
if (!stats_ || !ptr) return 0;
absl::MutexLock l(&mutex_);
return size_map_.at(ptr);
}
std::optional<tsl::AllocatorStats> GpuCudaMallocAsyncAllocator::GetStats() {
if (!stats_) return std::nullopt;
absl::MutexLock l(&mutex_);
return *stats_;
}
bool GpuCudaMallocAsyncAllocator::ClearStats() {
if (!stats_) return false;
absl::MutexLock l(&mutex_);
stats_->num_allocs = 0;
stats_->peak_bytes_in_use = stats_->bytes_in_use;
stats_->largest_alloc_size = 0;
return true;
}
void GpuCudaMallocAsyncAllocator::SetStreamAndPreallocateMemory(void* stream) {
auto new_cuda_stream = static_cast<CUstream>(stream);
if (cuda_state_->cuda_stream != nullptr &&
new_cuda_stream != cuda_state_->cuda_stream) {
LOG(FATAL) <<
"Trying to set the stream twice. This isn't supported. ";
}
uint64_t pool_size_64 = 0;
if (auto status = cuMemPoolGetAttribute(cuda_state_->pool,
CU_MEMPOOL_ATTR_RELEASE_THRESHOLD,
&pool_size_64)) {
LOG(FATAL) <<
"Failed to get CUDA pool attribute: " << cuda::ToStatus(status);
}
cuda_state_->cuda_stream = new_cuda_stream;
int64_t prealloc_size = 0;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar(
"TF_CUDA_MALLOC_ASYNC_SUPPORTED_PREALLOC", 0, &prealloc_size));
if (prealloc_size == -1) {
prealloc_size = pool_size_64;
} else if (reserve_memory_) {
prealloc_size = pool_size_64;
}
if (prealloc_size != 0) {
void* ptr = AllocateRaw(0, prealloc_size);
DeallocateRaw(ptr);
VLOG(2) << Name() << " GpuCudaMallocAsyncAllocator reserved the pool for "
<< prealloc_size << " bytes" << ". First ptr: " << ptr;
ClearStats();
}
}
} | #include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
namespace {
static se::StreamExecutor* GpuExecutor() {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
}
namespace stream_executor {
TEST(GpuCudaMallocAsyncAllocator, TwoAllocatorsShareDefaultPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream1, executor->CreateStream());
auto allocator1 = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator1.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream1.get()));
TF_ASSERT_OK_AND_ASSIGN(auto stream2, executor->CreateStream());
auto allocator2 = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator2.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream2.get()));
void* addr1 = allocator1.AllocateRaw(128, 127);
void* addr2 = allocator2.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator1.DeallocateRaw(addr1);
allocator2.DeallocateRaw(addr2);
EXPECT_TRUE(stream1->ok());
EXPECT_TRUE(stream2->ok());
}
TEST(GpuCudaMallocAsyncAllocator, AddressAlignedDefaultPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
2048,
true,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
TEST(GpuCudaMallocAsyncAllocator, AddressAlignedNewPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
true,
2048,
true,
0,
false,
false);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
TEST(GpuCudaMallocAsyncAllocator, SyncAddressAlignedNewPool) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto allocator = GpuCudaMallocAsyncAllocator(
tsl::PlatformDeviceId(executor->device_ordinal()),
true,
2048,
true,
0,
true,
true);
allocator.SetStreamAndPreallocateMemory(
se::gpu::AsGpuStreamValue(stream.get()));
void* addr1 = allocator.AllocateRaw(128, 127);
void* addr2 = allocator.AllocateRaw(128, 129);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr1) & 127), 0);
CHECK_EQ((reinterpret_cast<uintptr_t>(addr2) & 127), 0);
allocator.DeallocateRaw(addr1);
allocator.DeallocateRaw(addr2);
EXPECT_TRUE(stream->ok());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_cudamallocasync_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b2ce1fc-0d45-460e-9387-c716d81999e0 | cpp | google/quiche | quic_simple_server_stream | quiche/quic/tools/quic_simple_server_stream.cc | quiche/quic/tools/quic_simple_server_stream_test.cc | #include "quiche/quic/tools/quic_simple_server_stream.h"
#include <algorithm>
#include <cstdint>
#include <list>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/quic/core/http/quic_spdy_stream.h"
#include "quiche/quic/core/http/spdy_utils.h"
#include "quiche/quic/core/http/web_transport_http3.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/tools/quic_simple_server_session.h"
using quiche::HttpHeaderBlock;
namespace quic {
QuicSimpleServerStream::QuicSimpleServerStream(
QuicStreamId id, QuicSpdySession* session, StreamType type,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicSpdyServerStreamBase(id, session, type),
content_length_(-1),
generate_bytes_length_(0),
quic_simple_server_backend_(quic_simple_server_backend) {
QUICHE_DCHECK(quic_simple_server_backend_);
}
QuicSimpleServerStream::QuicSimpleServerStream(
PendingStream* pending, QuicSpdySession* session,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicSpdyServerStreamBase(pending, session),
content_length_(-1),
generate_bytes_length_(0),
quic_simple_server_backend_(quic_simple_server_backend) {
QUICHE_DCHECK(quic_simple_server_backend_);
}
QuicSimpleServerStream::~QuicSimpleServerStream() {
quic_simple_server_backend_->CloseBackendResponseStream(this);
}
void QuicSimpleServerStream::OnInitialHeadersComplete(
bool fin, size_t frame_len, const QuicHeaderList& header_list) {
QuicSpdyStream::OnInitialHeadersComplete(fin, frame_len, header_list);
if (!response_sent_ &&
!SpdyUtils::CopyAndValidateHeaders(header_list, &content_length_,
&request_headers_)) {
QUIC_DVLOG(1) << "Invalid headers";
SendErrorResponse();
}
ConsumeHeaderList();
if (!fin && !response_sent_ && IsConnectRequest()) {
if (quic_simple_server_backend_ == nullptr) {
QUIC_DVLOG(1) << "Backend is missing on CONNECT headers.";
SendErrorResponse();
return;
}
if (web_transport() != nullptr) {
QuicSimpleServerBackend::WebTransportResponse response =
quic_simple_server_backend_->ProcessWebTransportRequest(
request_headers_, web_transport());
if (response.response_headers[":status"] == "200") {
WriteHeaders(std::move(response.response_headers), false, nullptr);
if (response.visitor != nullptr) {
web_transport()->SetVisitor(std::move(response.visitor));
}
web_transport()->HeadersReceived(request_headers_);
} else {
WriteHeaders(std::move(response.response_headers), true, nullptr);
}
return;
}
quic_simple_server_backend_->HandleConnectHeaders(request_headers_,
this);
}
}
void QuicSimpleServerStream::OnBodyAvailable() {
while (HasBytesToRead()) {
struct iovec iov;
if (GetReadableRegions(&iov, 1) == 0) {
break;
}
QUIC_DVLOG(1) << "Stream " << id() << " processed " << iov.iov_len
<< " bytes.";
body_.append(static_cast<char*>(iov.iov_base), iov.iov_len);
if (content_length_ >= 0 &&
body_.size() > static_cast<uint64_t>(content_length_)) {
QUIC_DVLOG(1) << "Body size (" << body_.size() << ") > content length ("
<< content_length_ << ").";
SendErrorResponse();
return;
}
MarkConsumed(iov.iov_len);
}
if (!sequencer()->IsClosed()) {
if (IsConnectRequest()) {
HandleRequestConnectData(false);
}
sequencer()->SetUnblocked();
return;
}
OnFinRead();
if (write_side_closed() || fin_buffered()) {
return;
}
if (IsConnectRequest()) {
HandleRequestConnectData(true);
} else {
SendResponse();
}
}
void QuicSimpleServerStream::HandleRequestConnectData(bool fin_received) {
QUICHE_DCHECK(IsConnectRequest());
if (quic_simple_server_backend_ == nullptr) {
QUIC_DVLOG(1) << "Backend is missing on CONNECT data.";
ResetWriteSide(
QuicResetStreamError::FromInternal(QUIC_STREAM_CONNECT_ERROR));
return;
}
std::string data = std::move(body_);
body_.clear();
quic_simple_server_backend_->HandleConnectData(data,
fin_received,
this);
}
void QuicSimpleServerStream::SendResponse() {
QUICHE_DCHECK(!IsConnectRequest());
if (request_headers_.empty()) {
QUIC_DVLOG(1) << "Request headers empty.";
SendErrorResponse();
return;
}
if (content_length_ > 0 &&
static_cast<uint64_t>(content_length_) != body_.size()) {
QUIC_DVLOG(1) << "Content length (" << content_length_ << ") != body size ("
<< body_.size() << ").";
SendErrorResponse();
return;
}
if (!request_headers_.contains(":authority")) {
QUIC_DVLOG(1) << "Request headers do not contain :authority.";
SendErrorResponse();
return;
}
if (!request_headers_.contains(":path")) {
QUIC_DVLOG(1) << "Request headers do not contain :path.";
SendErrorResponse();
return;
}
if (quic_simple_server_backend_ == nullptr) {
QUIC_DVLOG(1) << "Backend is missing in SendResponse().";
SendErrorResponse();
return;
}
if (web_transport() != nullptr) {
QuicSimpleServerBackend::WebTransportResponse response =
quic_simple_server_backend_->ProcessWebTransportRequest(
request_headers_, web_transport());
if (response.response_headers[":status"] == "200") {
WriteHeaders(std::move(response.response_headers), false, nullptr);
if (response.visitor != nullptr) {
web_transport()->SetVisitor(std::move(response.visitor));
}
web_transport()->HeadersReceived(request_headers_);
} else {
WriteHeaders(std::move(response.response_headers), true, nullptr);
}
return;
}
quic_simple_server_backend_->FetchResponseFromBackend(request_headers_, body_,
this);
}
QuicConnectionId QuicSimpleServerStream::connection_id() const {
return spdy_session()->connection_id();
}
QuicStreamId QuicSimpleServerStream::stream_id() const { return id(); }
std::string QuicSimpleServerStream::peer_host() const {
return spdy_session()->peer_address().host().ToString();
}
QuicSpdyStream* QuicSimpleServerStream::GetStream() { return this; }
namespace {
class DelayedResponseAlarm : public QuicAlarm::DelegateWithContext {
public:
DelayedResponseAlarm(QuicSimpleServerStream* stream,
const QuicBackendResponse* response)
: QuicAlarm::DelegateWithContext(
stream->spdy_session()->connection()->context()),
stream_(stream),
response_(response) {
stream_ = stream;
response_ = response;
}
~DelayedResponseAlarm() override = default;
void OnAlarm() override { stream_->Respond(response_); }
private:
QuicSimpleServerStream* stream_;
const QuicBackendResponse* response_;
};
}
void QuicSimpleServerStream::OnResponseBackendComplete(
const QuicBackendResponse* response) {
if (response == nullptr) {
QUIC_DVLOG(1) << "Response not found in cache.";
SendNotFoundResponse();
return;
}
auto delay = response->delay();
if (delay.IsZero()) {
Respond(response);
return;
}
auto* connection = session()->connection();
delayed_response_alarm_.reset(connection->alarm_factory()->CreateAlarm(
new DelayedResponseAlarm(this, response)));
delayed_response_alarm_->Set(connection->clock()->Now() + delay);
}
void QuicSimpleServerStream::Respond(const QuicBackendResponse* response) {
for (const auto& headers : response->early_hints()) {
QUIC_DVLOG(1) << "Stream " << id() << " sending an Early Hints response: "
<< headers.DebugString();
WriteHeaders(headers.Clone(), false, nullptr);
}
if (response->response_type() == QuicBackendResponse::CLOSE_CONNECTION) {
QUIC_DVLOG(1) << "Special response: closing connection.";
OnUnrecoverableError(QUIC_NO_ERROR, "Toy server forcing close");
return;
}
if (response->response_type() == QuicBackendResponse::IGNORE_REQUEST) {
QUIC_DVLOG(1) << "Special response: ignoring request.";
return;
}
if (response->response_type() == QuicBackendResponse::BACKEND_ERR_RESPONSE) {
QUIC_DVLOG(1) << "Quic Proxy: Backend connection error.";
SendErrorResponse(502);
return;
}
std::string request_url = request_headers_[":authority"].as_string() +
request_headers_[":path"].as_string();
int response_code;
const HttpHeaderBlock& response_headers = response->headers();
if (!ParseHeaderStatusCode(response_headers, &response_code)) {
auto status = response_headers.find(":status");
if (status == response_headers.end()) {
QUIC_LOG(WARNING)
<< ":status not present in response from cache for request "
<< request_url;
} else {
QUIC_LOG(WARNING) << "Illegal (non-integer) response :status from cache: "
<< status->second << " for request " << request_url;
}
SendErrorResponse();
return;
}
if (response->response_type() == QuicBackendResponse::INCOMPLETE_RESPONSE) {
QUIC_DVLOG(1)
<< "Stream " << id()
<< " sending an incomplete response, i.e. no trailer, no fin.";
SendIncompleteResponse(response->headers().Clone(), response->body());
return;
}
if (response->response_type() == QuicBackendResponse::GENERATE_BYTES) {
QUIC_DVLOG(1) << "Stream " << id() << " sending a generate bytes response.";
std::string path = request_headers_[":path"].as_string().substr(1);
if (!absl::SimpleAtoi(path, &generate_bytes_length_)) {
QUIC_LOG(ERROR) << "Path is not a number.";
SendNotFoundResponse();
return;
}
HttpHeaderBlock headers = response->headers().Clone();
headers["content-length"] = absl::StrCat(generate_bytes_length_);
WriteHeaders(std::move(headers), false, nullptr);
QUICHE_DCHECK(!response_sent_);
response_sent_ = true;
WriteGeneratedBytes();
return;
}
QUIC_DVLOG(1) << "Stream " << id() << " sending response.";
SendHeadersAndBodyAndTrailers(response->headers().Clone(), response->body(),
response->trailers().Clone());
}
void QuicSimpleServerStream::SendStreamData(absl::string_view data,
bool close_stream) {
QUICHE_DCHECK(!data.empty() || close_stream);
if (close_stream) {
SendHeadersAndBodyAndTrailers(
std::nullopt, data,
quiche::HttpHeaderBlock());
} else {
SendIncompleteResponse(std::nullopt, data);
}
}
void QuicSimpleServerStream::TerminateStreamWithError(
QuicResetStreamError error) {
QUIC_DVLOG(1) << "Stream " << id() << " abruptly terminating with error "
<< error.internal_code();
ResetWriteSide(error);
}
void QuicSimpleServerStream::OnCanWrite() {
QuicSpdyStream::OnCanWrite();
WriteGeneratedBytes();
}
void QuicSimpleServerStream::WriteGeneratedBytes() {
static size_t kChunkSize = 1024;
while (!HasBufferedData() && generate_bytes_length_ > 0) {
size_t len = std::min<size_t>(kChunkSize, generate_bytes_length_);
std::string data(len, 'a');
generate_bytes_length_ -= len;
bool fin = generate_bytes_length_ == 0;
WriteOrBufferBody(data, fin);
}
}
void QuicSimpleServerStream::SendNotFoundResponse() {
QUIC_DVLOG(1) << "Stream " << id() << " sending not found response.";
HttpHeaderBlock headers;
headers[":status"] = "404";
headers["content-length"] = absl::StrCat(strlen(kNotFoundResponseBody));
SendHeadersAndBody(std::move(headers), kNotFoundResponseBody);
}
void QuicSimpleServerStream::SendErrorResponse() { SendErrorResponse(0); }
void QuicSimpleServerStream::SendErrorResponse(int resp_code) {
QUIC_DVLOG(1) << "Stream " << id() << " sending error response.";
if (!reading_stopped()) {
StopReading();
}
HttpHeaderBlock headers;
if (resp_code <= 0) {
headers[":status"] = "500";
} else {
headers[":status"] = absl::StrCat(resp_code);
}
headers["content-length"] = absl::StrCat(strlen(kErrorResponseBody));
SendHeadersAndBody(std::move(headers), kErrorResponseBody);
}
void QuicSimpleServerStream::SendIncompleteResponse(
std::optional<HttpHeaderBlock> response_headers, absl::string_view body) {
QUICHE_DCHECK_NE(response_headers.has_value(), response_sent_);
if (response_headers.has_value()) {
QUIC_DLOG(INFO) << "Stream " << id() << " writing headers (fin = false) : "
<< response_headers.value().DebugString();
int response_code;
if (!ParseHeaderStatusCode(*response_headers, &response_code) ||
response_code != 100) {
response_sent_ = true;
}
WriteHeaders(std::move(response_headers).value(), false, nullptr);
}
QUIC_DLOG(INFO) << "Stream " << id()
<< " writing body (fin = false) with size: " << body.size();
if (!body.empty()) {
WriteOrBufferBody(body, false);
}
}
void QuicSimpleServerStream::SendHeadersAndBody(
HttpHeaderBlock response_headers, absl::string_view body) {
SendHeadersAndBodyAndTrailers(std::move(response_headers), body,
HttpHeaderBlock());
}
void QuicSimpleServerStream::SendHeadersAndBodyAndTrailers(
std::optional<HttpHeaderBlock> response_headers, absl::string_view body,
HttpHeaderBlock response_trailers) {
QUICHE_DCHECK_NE(response_headers.has_value(), response_sent_);
if (response_headers.has_value()) {
bool send_fin = (body.empty() && response_trailers.empty());
QUIC_DLOG(INFO) << "Stream " << id()
<< " writing headers (fin = " << send_fin
<< ") : " << response_headers.value().DebugString();
WriteHeaders(std::move(response_headers).value(), send_fin, nullptr);
response_sent_ = true;
if (send_fin) {
return;
}
}
bool send_fin = response_trailers.empty();
QUIC_DLOG(INFO) << "Stream " << id() << " writing body (fin = " << send_fin
<< ") with size: " << body.size();
if (!body.empty() || send_fin) {
WriteOrBufferBody(body, send_fin);
}
if (send_fin) {
return;
}
QUIC_DLOG(INFO) << "Stream " << id() << " writing trailers (fin = true): "
<< response_trailers.DebugString();
WriteTrailers(std::move(response_trailers), nullptr);
}
bool QuicSimpleServerStream::IsConnectRequest() const {
auto method_it = request_headers_.find(":method");
return method_it != request_headers_.end() && method_it->second == "CONNECT";
}
void QuicSimpleServerStream::OnInvalidHeaders() {
QUIC_DVLOG(1) << "Invalid headers";
SendErrorResponse(400);
}
const char* const QuicSimpleServerStream::kErrorResponseBody = "bad";
const char* const QuicSimpleServerStream::kNotFoundResponseBody =
"file not found";
} | #include "quiche/quic/tools/quic_simple_server_stream.h"
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/http/http_encoder.h"
#include "quiche/quic/core/http/spdy_utils.h"
#include "quiche/quic/core/quic_alarm_factory.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simulator/simulator.h"
#include "quiche/quic/tools/quic_backend_response.h"
#include "quiche/quic/tools/quic_memory_cache_backend.h"
#include "quiche/quic/tools/quic_simple_server_backend.h"
#include "quiche/quic/tools/quic_simple_server_session.h"
#include "quiche/common/simple_buffer_allocator.h"
using testing::_;
using testing::AnyNumber;
using testing::InSequence;
using testing::Invoke;
using testing::StrictMock;
namespace quic {
namespace test {
const size_t kFakeFrameLen = 60;
const size_t kErrorLength = strlen(QuicSimpleServerStream::kErrorResponseBody);
const size_t kDataFrameHeaderLength = 2;
class TestStream : public QuicSimpleServerStream {
public:
TestStream(QuicStreamId stream_id, QuicSpdySession* session, StreamType type,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicSimpleServerStream(stream_id, session, type,
quic_simple_server_backend) {
EXPECT_CALL(*this, WriteOrBufferBody(_, _))
.Times(AnyNumber())
.WillRepeatedly([this](absl::string_view data, bool fin) {
this->QuicSimpleServerStream::WriteOrBufferBody(data, fin);
});
}
~TestStream() override = default;
MOCK_METHOD(void, FireAlarmMock, (), ());
MOCK_METHOD(void, WriteHeadersMock, (bool fin), ());
MOCK_METHOD(void, WriteEarlyHintsHeadersMock, (bool fin), ());
MOCK_METHOD(void, WriteOrBufferBody, (absl::string_view data, bool fin),
(override));
size_t WriteHeaders(
quiche::HttpHeaderBlock header_block, bool fin,
quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>
) override {
if (header_block[":status"] == "103") {
WriteEarlyHintsHeadersMock(fin);
} else {
WriteHeadersMock(fin);
}
return 0;
}
void DoSendResponse() { SendResponse(); }
void DoSendErrorResponse() { QuicSimpleServerStream::SendErrorResponse(); }
quiche::HttpHeaderBlock* mutable_headers() { return &request_headers_; }
void set_body(std::string body) { body_ = std::move(body); }
const std::string& body() const { return body_; }
int content_length() const { return content_length_; }
bool send_response_was_called() const { return send_response_was_called_; }
bool send_error_response_was_called() const {
return send_error_response_was_called_;
}
absl::string_view GetHeader(absl::string_view key) const {
auto it = request_headers_.find(key);
QUICHE_DCHECK(it != request_headers_.end());
return it->second;
}
void ReplaceBackend(QuicSimpleServerBackend* backend) {
set_quic_simple_server_backend_for_test(backend);
}
protected:
void SendResponse() override {
send_response_was_called_ = true;
QuicSimpleServerStream::SendResponse();
}
void SendErrorResponse(int resp_code) override {
send_error_response_was_called_ = true;
QuicSimpleServerStream::SendErrorResponse(resp_code);
}
private:
bool send_response_was_called_ = false;
bool send_error_response_was_called_ = false;
};
namespace {
class MockQuicSimpleServerSession : public QuicSimpleServerSession {
public:
const size_t kMaxStreamsForTest = 100;
MockQuicSimpleServerSession(
QuicConnection* connection, MockQuicSessionVisitor* owner,
MockQuicCryptoServerStreamHelper* helper,
QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache,
QuicSimpleServerBackend* quic_simple_server_backend)
: QuicSimpleServerSession(DefaultQuicConfig(), CurrentSupportedVersions(),
connection, owner, helper, crypto_config,
compressed_certs_cache,
quic_simple_server_backend) {
if (VersionHasIetfQuicFrames(connection->transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingUnidirectionalStreams(
this, kMaxStreamsForTest);
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(
this, kMaxStreamsForTest);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(this, kMaxStreamsForTest);
QuicSessionPeer::SetMaxOpenOutgoingStreams(this, kMaxStreamsForTest);
}
ON_CALL(*this, WritevData(_, _, _, _, _, _))
.WillByDefault(Invoke(this, &MockQuicSimpleServerSession::ConsumeData));
}
MockQuicSimpleServerSession(const MockQuicSimpleServerSession&) = delete;
MockQuicSimpleServerSession& operator=(const MockQuicSimpleServerSession&) =
delete;
~MockQuicSimpleServerSession() override = default;
MOCK_METHOD(void, OnConnectionClosed,
(const QuicConnectionCloseFrame& frame,
ConnectionCloseSource source),
(override));
MOCK_METHOD(QuicSpdyStream*, CreateIncomingStream, (QuicStreamId id),
(override));
MOCK_METHOD(QuicConsumedData, WritevData,
(QuicStreamId id, size_t write_length, QuicStreamOffset offset,
StreamSendingState state, TransmissionType type,
EncryptionLevel level),
(override));
MOCK_METHOD(void, OnStreamHeaderList,
(QuicStreamId stream_id, bool fin, size_t frame_len,
const QuicHeaderList& header_list),
(override));
MOCK_METHOD(void, OnStreamHeadersPriority,
(QuicStreamId stream_id,
const spdy::SpdyStreamPrecedence& precedence),
(override));
MOCK_METHOD(void, MaybeSendRstStreamFrame,
(QuicStreamId stream_id, QuicResetStreamError error,
QuicStreamOffset bytes_written),
(override));
MOCK_METHOD(void, MaybeSendStopSendingFrame,
(QuicStreamId stream_id, QuicResetStreamError error), (override));
using QuicSession::ActivateStream;
QuicConsumedData ConsumeData(QuicStreamId id, size_t write_length,
QuicStreamOffset offset,
StreamSendingState state,
TransmissionType ,
std::optional<EncryptionLevel> ) {
if (write_length > 0) {
auto buf = std::make_unique<char[]>(write_length);
QuicStream* stream = GetOrCreateStream(id);
QUICHE_DCHECK(stream);
QuicDataWriter writer(write_length, buf.get(), quiche::HOST_BYTE_ORDER);
stream->WriteStreamData(offset, write_length, &writer);
} else {
QUICHE_DCHECK(state != NO_FIN);
}
return QuicConsumedData(write_length, state != NO_FIN);
}
quiche::HttpHeaderBlock original_request_headers_;
};
class QuicSimpleServerStreamTest : public QuicTestWithParam<ParsedQuicVersion> {
public:
QuicSimpleServerStreamTest()
: connection_(new StrictMock<MockQuicConnection>(
&simulator_, simulator_.GetAlarmFactory(), Perspective::IS_SERVER,
SupportedVersions(GetParam()))),
crypto_config_(new QuicCryptoServerConfig(
QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(),
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default())),
compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize),
session_(connection_, &session_owner_, &session_helper_,
crypto_config_.get(), &compressed_certs_cache_,
&memory_cache_backend_),
quic_response_(new QuicBackendResponse),
body_("hello world") {
connection_->set_visitor(&session_);
header_list_.OnHeader(":authority", "www.google.com");
header_list_.OnHeader(":path", "/");
header_list_.OnHeader(":method", "POST");
header_list_.OnHeader(":scheme", "https");
header_list_.OnHeader("content-length", "11");
header_list_.OnHeaderBlockEnd(128, 128);
session_.config()->SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
session_.config()->SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
session_.Initialize();
connection_->SetEncrypter(
quic::ENCRYPTION_FORWARD_SECURE,
std::make_unique<quic::NullEncrypter>(connection_->perspective()));
if (connection_->version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(connection_);
}
stream_ = new StrictMock<TestStream>(
GetNthClientInitiatedBidirectionalStreamId(
connection_->transport_version(), 0),
&session_, BIDIRECTIONAL, &memory_cache_backend_);
session_.ActivateStream(absl::WrapUnique(stream_));
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(session_.config(), 10);
session_.OnConfigNegotiated();
simulator_.RunFor(QuicTime::Delta::FromSeconds(1));
}
const std::string& StreamBody() { return stream_->body(); }
std::string StreamHeadersValue(const std::string& key) {
return (*stream_->mutable_headers())[key].as_string();
}
bool UsesHttp3() const {
return VersionUsesHttp3(connection_->transport_version());
}
void ReplaceBackend(std::unique_ptr<QuicSimpleServerBackend> backend) {
replacement_backend_ = std::move(backend);
stream_->ReplaceBackend(replacement_backend_.get());
}
quic::simulator::Simulator simulator_;
quiche::HttpHeaderBlock response_headers_;
MockQuicConnectionHelper helper_;
StrictMock<MockQuicConnection>* connection_;
StrictMock<MockQuicSessionVisitor> session_owner_;
StrictMock<MockQuicCryptoServerStreamHelper> session_helper_;
std::unique_ptr<QuicCryptoServerConfig> crypto_config_;
QuicCompressedCertsCache compressed_certs_cache_;
QuicMemoryCacheBackend memory_cache_backend_;
std::unique_ptr<QuicSimpleServerBackend> replacement_backend_;
StrictMock<MockQuicSimpleServerSession> session_;
StrictMock<TestStream>* stream_;
std::unique_ptr<QuicBackendResponse> quic_response_;
std::string body_;
QuicHeaderList header_list_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSimpleServerStreamTest,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSimpleServerStreamTest, TestFraming) {
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_);
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body_.length(), quiche::SimpleBufferAllocator::Get());
std::string data =
UsesHttp3() ? absl::StrCat(header.AsStringView(), body_) : body_;
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, data));
EXPECT_EQ("11", StreamHeadersValue("content-length"));
EXPECT_EQ("/", StreamHeadersValue(":path"));
EXPECT_EQ("POST", StreamHeadersValue(":method"));
EXPECT_EQ(body_, StreamBody());
}
TEST_P(QuicSimpleServerStreamTest, TestFramingOnePacket) {
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_);
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body_.length(), quiche::SimpleBufferAllocator::Get());
std::string data =
UsesHttp3() ? absl::StrCat(header.AsStringView(), body_) : body_;
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, data));
EXPECT_EQ("11", StreamHeadersValue("content-length"));
EXPECT_EQ("/", StreamHeadersValue(":path"));
EXPECT_EQ("POST", StreamHeadersValue(":method"));
EXPECT_EQ(body_, StreamBody());
}
TEST_P(QuicSimpleServerStreamTest, SendQuicRstStreamNoErrorInStopReading) {
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
EXPECT_FALSE(stream_->fin_received());
EXPECT_FALSE(stream_->rst_received());
QuicStreamPeer::SetFinSent(stream_);
stream_->CloseWriteSide();
if (session_.version().UsesHttp3()) {
EXPECT_CALL(session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_NO_ERROR)))
.Times(1);
} else {
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_STREAM_NO_ERROR), _))
.Times(1);
}
stream_->StopReading();
}
TEST_P(QuicSimpleServerStreamTest, TestFramingExtraData) {
InSequence seq;
std::string large_body = "hello world!!!!!!";
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_,
WritevData(_, kDataFrameHeaderLength, _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _));
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_);
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body_.length(), quiche::SimpleBufferAllocator::Get());
std::string data =
UsesHttp3() ? absl::StrCat(header.AsStringView(), body_) : body_;
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, data));
header = HttpEncoder::SerializeDataFrameHeader(
large_body.length(), quiche::SimpleBufferAllocator::Get());
std::string data2 = UsesHttp3()
? absl::StrCat(header.AsStringView(), large_body)
: large_body;
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), true, data.size(), data2));
EXPECT_EQ("11", StreamHeadersValue("content-length"));
EXPECT_EQ("/", StreamHeadersValue(":path"));
EXPECT_EQ("POST", StreamHeadersValue(":method"));
}
TEST_P(QuicSimpleServerStreamTest, SendResponseWithIllegalResponseStatus) {
quiche::HttpHeaderBlock* request_headers = stream_->mutable_headers();
(*request_headers)[":path"] = "/bar";
(*request_headers)[":authority"] = "www.google.com";
(*request_headers)[":method"] = "GET";
response_headers_[":status"] = "200 OK";
response_headers_["content-length"] = "5";
std::string body = "Yummm";
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body.length(), quiche::SimpleBufferAllocator::Get());
memory_cache_backend_.AddResponse("www.google.com", "/bar",
std::move(response_headers_), body);
QuicStreamPeer::SetFinReceived(stream_);
InSequence s;
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _));
stream_->DoSendResponse();
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, SendResponseWithIllegalResponseStatus2) {
quiche::HttpHeaderBlock* request_headers = stream_->mutable_headers();
(*request_headers)[":path"] = "/bar";
(*request_headers)[":authority"] = "www.google.com";
(*request_headers)[":method"] = "GET";
response_headers_[":status"] = "+200";
response_headers_["content-length"] = "5";
std::string body = "Yummm";
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body.length(), quiche::SimpleBufferAllocator::Get());
memory_cache_backend_.AddResponse("www.google.com", "/bar",
std::move(response_headers_), body);
QuicStreamPeer::SetFinReceived(stream_);
InSequence s;
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _));
stream_->DoSendResponse();
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, SendResponseWithValidHeaders) {
quiche::HttpHeaderBlock* request_headers = stream_->mutable_headers();
(*request_headers)[":path"] = "/bar";
(*request_headers)[":authority"] = "www.google.com";
(*request_headers)[":method"] = "GET";
response_headers_[":status"] = "200";
response_headers_["content-length"] = "5";
std::string body = "Yummm";
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body.length(), quiche::SimpleBufferAllocator::Get());
memory_cache_backend_.AddResponse("www.google.com", "/bar",
std::move(response_headers_), body);
QuicStreamPeer::SetFinReceived(stream_);
InSequence s;
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, body.length(), _, FIN, _, _));
stream_->DoSendResponse();
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, SendResponseWithEarlyHints) {
std::string host = "www.google.com";
std::string request_path = "/foo";
std::string body = "Yummm";
quiche::HttpHeaderBlock* request_headers = stream_->mutable_headers();
(*request_headers)[":path"] = request_path;
(*request_headers)[":authority"] = host;
(*request_headers)[":method"] = "GET";
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body.length(), quiche::SimpleBufferAllocator::Get());
std::vector<quiche::HttpHeaderBlock> early_hints;
const size_t kNumEarlyHintsResponses = 2;
for (size_t i = 0; i < kNumEarlyHintsResponses; ++i) {
quiche::HttpHeaderBlock hints;
hints["link"] = "</image.png>; rel=preload; as=image";
early_hints.push_back(std::move(hints));
}
response_headers_[":status"] = "200";
response_headers_["content-length"] = "5";
memory_cache_backend_.AddResponseWithEarlyHints(
host, request_path, std::move(response_headers_), body, early_hints);
QuicStreamPeer::SetFinReceived(stream_);
InSequence s;
for (size_t i = 0; i < kNumEarlyHintsResponses; ++i) {
EXPECT_CALL(*stream_, WriteEarlyHintsHeadersMock(false));
}
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, body.length(), _, FIN, _, _));
stream_->DoSendResponse();
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->write_side_closed());
}
class AlarmTestDelegate : public QuicAlarm::DelegateWithoutContext {
public:
AlarmTestDelegate(TestStream* stream) : stream_(stream) {}
void OnAlarm() override { stream_->FireAlarmMock(); }
private:
TestStream* stream_;
};
TEST_P(QuicSimpleServerStreamTest, SendResponseWithDelay) {
quiche::HttpHeaderBlock* request_headers = stream_->mutable_headers();
std::string host = "www.google.com";
std::string path = "/bar";
(*request_headers)[":path"] = path;
(*request_headers)[":authority"] = host;
(*request_headers)[":method"] = "GET";
response_headers_[":status"] = "200";
response_headers_["content-length"] = "5";
std::string body = "Yummm";
QuicTime::Delta delay = QuicTime::Delta::FromMilliseconds(3000);
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
body.length(), quiche::SimpleBufferAllocator::Get());
memory_cache_backend_.AddResponse(host, path, std::move(response_headers_),
body);
auto did_delay_succeed =
memory_cache_backend_.SetResponseDelay(host, path, delay);
EXPECT_TRUE(did_delay_succeed);
auto did_invalid_delay_succeed =
memory_cache_backend_.SetResponseDelay(host, "nonsense", delay);
EXPECT_FALSE(did_invalid_delay_succeed);
std::unique_ptr<QuicAlarm> alarm(connection_->alarm_factory()->CreateAlarm(
new AlarmTestDelegate(stream_)));
alarm->Set(connection_->clock()->Now() + delay);
QuicStreamPeer::SetFinReceived(stream_);
InSequence s;
EXPECT_CALL(*stream_, FireAlarmMock());
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_, WritevData(_, header.size(), _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, body.length(), _, FIN, _, _));
stream_->DoSendResponse();
simulator_.RunFor(delay);
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, TestSendErrorResponse) {
QuicStreamPeer::SetFinReceived(stream_);
InSequence s;
EXPECT_CALL(*stream_, WriteHeadersMock(false));
if (UsesHttp3()) {
EXPECT_CALL(session_,
WritevData(_, kDataFrameHeaderLength, _, NO_FIN, _, _));
}
EXPECT_CALL(session_, WritevData(_, kErrorLength, _, FIN, _, _));
stream_->DoSendErrorResponse();
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, InvalidMultipleContentLength) {
quiche::HttpHeaderBlock request_headers;
header_list_.OnHeader("content-length", absl::string_view("11\00012", 5));
if (session_.version().UsesHttp3()) {
EXPECT_CALL(session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_NO_ERROR)));
}
EXPECT_CALL(*stream_, WriteHeadersMock(false));
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
stream_->OnStreamHeaderList(true, kFakeFrameLen, header_list_);
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->reading_stopped());
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, InvalidLeadingNullContentLength) {
quiche::HttpHeaderBlock request_headers;
header_list_.OnHeader("content-length", absl::string_view("\00012", 3));
if (session_.version().UsesHttp3()) {
EXPECT_CALL(session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_NO_ERROR)));
}
EXPECT_CALL(*stream_, WriteHeadersMock(false));
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
stream_->OnStreamHeaderList(true, kFakeFrameLen, header_list_);
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->reading_stopped());
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, InvalidMultipleContentLengthII) {
quiche::HttpHeaderBlock request_headers;
header_list_.OnHeader("content-length", absl::string_view("11\00011", 5));
if (session_.version().UsesHttp3()) {
EXPECT_CALL(session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_NO_ERROR)));
EXPECT_CALL(*stream_, WriteHeadersMock(false));
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
}
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list_);
if (session_.version().UsesHttp3()) {
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_TRUE(stream_->reading_stopped());
EXPECT_TRUE(stream_->write_side_closed());
} else {
EXPECT_EQ(11, stream_->content_length());
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_FALSE(stream_->reading_stopped());
EXPECT_FALSE(stream_->write_side_closed());
}
}
TEST_P(QuicSimpleServerStreamTest,
DoNotSendQuicRstStreamNoErrorWithRstReceived) {
EXPECT_FALSE(stream_->reading_stopped());
if (VersionUsesHttp3(connection_->transport_version())) {
auto* qpack_decoder_stream =
QuicSpdySessionPeer::GetQpackDecoderSendStream(&session_);
EXPECT_CALL(session_, WritevData(qpack_decoder_stream->id(), _, _, _, _, _))
.Times(AnyNumber());
}
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_,
session_.version().UsesHttp3()
? QuicResetStreamError::FromInternal(QUIC_STREAM_CANCELLED)
: QuicResetStreamError::FromInternal(QUIC_RST_ACKNOWLEDGEMENT),
_))
.Times(1);
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
if (VersionHasIetfQuicFrames(connection_->transport_version())) {
EXPECT_CALL(session_owner_, OnStopSendingReceived(_));
QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED);
session_.OnStopSendingFrame(stop_sending);
}
EXPECT_TRUE(stream_->reading_stopped());
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicSimpleServerStreamTest, InvalidHeadersWithFin) {
char arr[] = {
0x3a, 0x68, 0x6f, 0x73,
0x74, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x07, 0x3a, 0x6d, 0x65,
0x74, 0x68, 0x6f, 0x64,
0x00, 0x00, 0x00, 0x03,
0x47, 0x45, 0x54, 0x00,
0x00, 0x00, 0x05, 0x3a,
0x70, 0x61, 0x74, 0x68,
0x00, 0x00, 0x00, 0x04,
0x2f, 0x66, 0x6f, 0x6f,
0x00, 0x00, 0x00, 0x07,
0x3a, 0x73, 0x63, 0x68,
0x65, 0x6d, 0x65, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x3a,
0x76, 0x65, 0x72, 0x73,
'\x96', 0x6f, 0x6e, 0x00,
0x00, 0x00, 0x08, 0x48,
0x54, 0x54, 0x50, 0x2f,
0x31, 0x2e, 0x31,
};
absl::string_view data(arr, ABSL_ARRAYSIZE(arr));
QuicStreamFrame frame(stream_->id(), true, 0, data);
stream_->OnStreamFrame(frame);
}
class TestQuicSimpleServerBackend : public QuicSimpleServerBackend {
public:
TestQuicSimpleServerBackend() = default;
~TestQuicSimpleServerBackend() override = default;
bool InitializeBackend(const std::string& ) override {
return true;
}
bool IsBackendInitialized() const override { return true; }
MOCK_METHOD(void, FetchResponseFromBackend,
(const quiche::HttpHeaderBlock&, const std::string&,
RequestHandler*),
(override));
MOCK_METHOD(void, HandleConnectHeaders,
(const quiche::HttpHeaderBlock&, RequestHandler*), (override));
MOCK_METHOD(void, HandleConnectData,
(absl::string_view, bool, RequestHandler*), (override));
void CloseBackendResponseStream(
RequestHandler* ) override {}
};
ACTION_P(SendHeadersResponse, response_ptr) {
arg1->OnResponseBackendComplete(response_ptr);
}
ACTION_P(SendStreamData, data, close_stream) {
arg2->SendStreamData(data, close_stream);
}
ACTION_P(TerminateStream, error) { arg1->TerminateStreamWithError(error); }
TEST_P(QuicSimpleServerStreamTest, ConnectSendsIntermediateResponses) {
auto test_backend = std::make_unique<TestQuicSimpleServerBackend>();
TestQuicSimpleServerBackend* test_backend_ptr = test_backend.get();
ReplaceBackend(std::move(test_backend));
constexpr absl::string_view kRequestBody = "\x11\x11";
quiche::HttpHeaderBlock response_headers;
response_headers[":status"] = "200";
QuicBackendResponse headers_response;
headers_response.set_headers(response_headers.Clone());
headers_response.set_response_type(QuicBackendResponse::INCOMPLETE_RESPONSE);
constexpr absl::string_view kBody1 = "\x22\x22";
constexpr absl::string_view kBody2 = "\x33\x33";
InSequence s;
EXPECT_CALL(*test_backend_ptr, HandleConnectHeaders(_, _))
.WillOnce(SendHeadersResponse(&headers_response));
EXPECT_CALL(*stream_, WriteHeadersMock(false));
EXPECT_CALL(*test_backend_ptr, HandleConnectData(kRequestBody, false, _))
.WillOnce(SendStreamData(kBody1,
false));
EXPECT_CALL(*stream_, WriteOrBufferBody(kBody1, false));
EXPECT_CALL(*test_backend_ptr, HandleConnectData(kRequestBody, true, _))
.WillOnce(SendStreamData(kBody2,
true));
EXPECT_CALL(*stream_, WriteOrBufferBody(kBody2, true));
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeaderBlockEnd(128, 128);
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list);
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
kRequestBody.length(), quiche::SimpleBufferAllocator::Get());
std::string data = UsesHttp3()
? absl::StrCat(header.AsStringView(), kRequestBody)
: std::string(kRequestBody);
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, data));
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), true, data.length(), data));
EXPECT_FALSE(stream_->send_response_was_called());
EXPECT_FALSE(stream_->send_error_response_was_called());
}
TEST_P(QuicSimpleServerStreamTest, ErrorOnUnhandledConnect) {
EXPECT_CALL(*stream_, WriteHeadersMock(true));
EXPECT_CALL(session_, MaybeSendRstStreamFrame(stream_->id(), _, _));
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeaderBlockEnd(128, 128);
constexpr absl::string_view kRequestBody = "\x11\x11";
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list);
quiche::QuicheBuffer header = HttpEncoder::SerializeDataFrameHeader(
kRequestBody.length(), quiche::SimpleBufferAllocator::Get());
std::string data = UsesHttp3()
? absl::StrCat(header.AsStringView(), kRequestBody)
: std::string(kRequestBody);
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), true, 0, data));
EXPECT_FALSE(stream_->send_response_was_called());
EXPECT_FALSE(stream_->send_error_response_was_called());
}
TEST_P(QuicSimpleServerStreamTest, ConnectWithInvalidHeader) {
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeader("InVaLiD-HeAdEr", "Well that's just wrong!");
header_list.OnHeaderBlockEnd(128, 128);
if (UsesHttp3()) {
EXPECT_CALL(session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_NO_ERROR)))
.Times(1);
} else {
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_STREAM_NO_ERROR), _))
.Times(1);
}
EXPECT_CALL(*stream_, WriteHeadersMock(false));
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list);
EXPECT_FALSE(stream_->send_response_was_called());
EXPECT_TRUE(stream_->send_error_response_was_called());
}
TEST_P(QuicSimpleServerStreamTest, BackendCanTerminateStream) {
auto test_backend = std::make_unique<TestQuicSimpleServerBackend>();
TestQuicSimpleServerBackend* test_backend_ptr = test_backend.get();
ReplaceBackend(std::move(test_backend));
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(
Invoke(&session_, &MockQuicSimpleServerSession::ConsumeData));
QuicResetStreamError expected_error =
QuicResetStreamError::FromInternal(QUIC_STREAM_CONNECT_ERROR);
EXPECT_CALL(*test_backend_ptr, HandleConnectHeaders(_, _))
.WillOnce(TerminateStream(expected_error));
EXPECT_CALL(session_,
MaybeSendRstStreamFrame(stream_->id(), expected_error, _));
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeaderBlockEnd(128, 128);
stream_->OnStreamHeaderList(false, kFakeFrameLen, header_list);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_simple_server_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_simple_server_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
38bacd4d-da84-44e2-a7b8-67bd2578d95b | cpp | tensorflow/tensorflow | file_system | third_party/xla/third_party/tsl/tsl/platform/file_system.cc | tensorflow/core/platform/file_system_test.cc | #include "tsl/platform/file_system.h"
#include <sys/stat.h>
#include <algorithm>
#include <deque>
#include <string>
#include <utility>
#include <vector>
#include "tsl/platform/status.h"
#if defined(PLATFORM_POSIX) || defined(IS_MOBILE_PLATFORM) || \
defined(PLATFORM_GOOGLE)
#include <fnmatch.h>
#else
#include "tsl/platform/regexp.h"
#endif
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
namespace tsl {
bool FileSystem::Match(const string& filename, const string& pattern) {
#if defined(PLATFORM_POSIX) || defined(IS_MOBILE_PLATFORM) || \
defined(PLATFORM_GOOGLE)
return fnmatch(pattern.c_str(), filename.c_str(), FNM_PATHNAME) == 0;
#else
string regexp(pattern);
regexp = str_util::StringReplace(regexp, "*", "[^/]*", true);
regexp = str_util::StringReplace(regexp, "?", ".", true);
regexp = str_util::StringReplace(regexp, "(", "\\(", true);
regexp = str_util::StringReplace(regexp, ")", "\\)", true);
return RE2::FullMatch(filename, regexp);
#endif
}
string FileSystem::TranslateName(const string& name) const {
if (name.empty()) return name;
absl::string_view scheme, host, path;
this->ParseURI(name, &scheme, &host, &path);
if (path.empty()) return "/";
return this->CleanPath(path);
}
absl::Status FileSystem::IsDirectory(const string& name,
TransactionToken* token) {
TF_RETURN_IF_ERROR(FileExists(name));
FileStatistics stat;
TF_RETURN_IF_ERROR(Stat(name, &stat));
if (stat.is_directory) {
return absl::OkStatus();
}
return absl::Status(absl::StatusCode::kFailedPrecondition, "Not a directory");
}
absl::Status FileSystem::HasAtomicMove(const string& path,
bool* has_atomic_move) {
*has_atomic_move = true;
return absl::OkStatus();
}
absl::Status FileSystem::CanCreateTempFile(const std::string& fname,
bool* can_create_temp_file) {
*can_create_temp_file = true;
return absl::OkStatus();
}
void FileSystem::FlushCaches(TransactionToken* token) {}
bool FileSystem::FilesExist(const std::vector<string>& files,
TransactionToken* token,
std::vector<absl::Status>* status) {
bool result = true;
for (const auto& file : files) {
absl::Status s = FileExists(file);
result &= s.ok();
if (status != nullptr) {
status->push_back(s);
} else if (!result) {
return false;
}
}
return result;
}
absl::Status FileSystem::DeleteRecursively(const string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
CHECK_NOTNULL(undeleted_files);
CHECK_NOTNULL(undeleted_dirs);
*undeleted_files = 0;
*undeleted_dirs = 0;
absl::Status exists_status = FileExists(dirname);
if (!exists_status.ok()) {
(*undeleted_dirs)++;
return exists_status;
}
if (!IsDirectory(dirname).ok()) {
absl::Status delete_root_status = DeleteFile(dirname);
if (!delete_root_status.ok()) (*undeleted_files)++;
return delete_root_status;
}
std::deque<string> dir_q;
std::vector<string> dir_list;
dir_q.push_back(dirname);
absl::Status ret;
while (!dir_q.empty()) {
string dir = dir_q.front();
dir_q.pop_front();
dir_list.push_back(dir);
std::vector<string> children;
absl::Status s = GetChildren(dir, &children);
ret.Update(s);
if (!s.ok()) {
(*undeleted_dirs)++;
continue;
}
for (const string& child : children) {
const string child_path = this->JoinPath(dir, child);
if (IsDirectory(child_path).ok()) {
dir_q.push_back(child_path);
} else {
absl::Status del_status = DeleteFile(child_path);
ret.Update(del_status);
if (!del_status.ok()) {
(*undeleted_files)++;
}
}
}
}
std::reverse(dir_list.begin(), dir_list.end());
for (const string& dir : dir_list) {
absl::Status s = DeleteDir(dir);
ret.Update(s);
if (!s.ok()) {
(*undeleted_dirs)++;
}
}
return ret;
}
absl::Status FileSystem::RecursivelyCreateDir(const string& dirname,
TransactionToken* token) {
absl::string_view scheme, host, remaining_dir;
this->ParseURI(dirname, &scheme, &host, &remaining_dir);
std::vector<absl::string_view> sub_dirs;
while (!remaining_dir.empty()) {
std::string current_entry = this->CreateURI(scheme, host, remaining_dir);
absl::Status exists_status = FileExists(current_entry);
if (exists_status.ok()) {
absl::Status directory_status = IsDirectory(current_entry);
if (directory_status.ok()) {
break;
} else if (directory_status.code() == absl::StatusCode::kUnimplemented) {
return directory_status;
} else {
return errors::FailedPrecondition(remaining_dir, " is not a directory");
}
}
if (exists_status.code() != error::Code::NOT_FOUND) {
return exists_status;
}
if (!absl::EndsWith(remaining_dir, "/")) {
sub_dirs.push_back(this->Basename(remaining_dir));
}
remaining_dir = this->Dirname(remaining_dir);
}
std::reverse(sub_dirs.begin(), sub_dirs.end());
string built_path(remaining_dir);
for (const absl::string_view sub_dir : sub_dirs) {
built_path = this->JoinPath(built_path, sub_dir);
absl::Status status = CreateDir(this->CreateURI(scheme, host, built_path));
if (!status.ok() && status.code() != absl::StatusCode::kAlreadyExists) {
return status;
}
}
return absl::OkStatus();
}
absl::Status FileSystem::CopyFile(const string& src, const string& target,
TransactionToken* token) {
return FileSystemCopyFile(this, src, this, target);
}
char FileSystem::Separator() const { return '/'; }
string FileSystem::JoinPathImpl(
std::initializer_list<absl::string_view> paths) {
string result;
for (absl::string_view path : paths) {
if (path.empty()) continue;
if (result.empty()) {
result = string(path);
continue;
}
if (result[result.size() - 1] == '/') {
if (this->IsAbsolutePath(path)) {
strings::StrAppend(&result, path.substr(1));
} else {
strings::StrAppend(&result, path);
}
} else {
if (this->IsAbsolutePath(path)) {
strings::StrAppend(&result, path);
} else {
strings::StrAppend(&result, "/", path);
}
}
}
return result;
}
std::pair<absl::string_view, absl::string_view> FileSystem::SplitPath(
absl::string_view uri) const {
absl::string_view scheme, host, path;
ParseURI(uri, &scheme, &host, &path);
if (path.empty()) {
return std::make_pair(absl::string_view(), absl::string_view());
}
size_t pos = path.rfind(this->Separator());
#ifdef PLATFORM_WINDOWS
size_t pos2 = path.rfind('/');
if (pos == string::npos) {
pos = pos2;
} else {
if (pos2 != string::npos) {
pos = pos > pos2 ? pos : pos2;
}
}
#endif
if (pos == absl::string_view::npos) {
if (host.empty()) {
return std::make_pair(absl::string_view(), path);
}
return std::make_pair(
absl::string_view(uri.data(), host.end() - uri.begin()), path);
}
if (pos == 0) {
return std::make_pair(
absl::string_view(uri.data(), path.begin() + 1 - uri.begin()),
absl::string_view(path.data() + 1, path.size() - 1));
}
return std::make_pair(
absl::string_view(uri.data(), path.begin() + pos - uri.begin()),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
bool FileSystem::IsAbsolutePath(absl::string_view path) const {
return !path.empty() && path[0] == '/';
}
absl::string_view FileSystem::Dirname(absl::string_view path) const {
return this->SplitPath(path).first;
}
absl::string_view FileSystem::Basename(absl::string_view path) const {
return this->SplitPath(path).second;
}
absl::string_view FileSystem::Extension(absl::string_view path) const {
absl::string_view basename = this->Basename(path);
size_t pos = basename.rfind('.');
if (pos == absl::string_view::npos) {
return absl::string_view(path.data() + path.size(), 0);
} else {
return absl::string_view(path.data() + pos + 1, path.size() - (pos + 1));
}
}
string FileSystem::CleanPath(absl::string_view unclean_path) const {
string path(unclean_path);
const char* src = path.c_str();
string::iterator dst = path.begin();
const bool is_absolute_path = *src == '/';
if (is_absolute_path) {
*dst++ = *src++;
while (*src == '/') ++src;
}
string::const_iterator backtrack_limit = dst;
while (*src) {
bool parsed = false;
if (src[0] == '.') {
if (src[1] == '/' || !src[1]) {
if (*++src) {
++src;
}
parsed = true;
} else if (src[1] == '.' && (src[2] == '/' || !src[2])) {
src += 2;
if (dst != backtrack_limit) {
for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) {
}
} else if (!is_absolute_path) {
src -= 2;
*dst++ = *src++;
*dst++ = *src++;
if (*src) {
*dst++ = *src;
}
backtrack_limit = dst;
}
if (*src) {
++src;
}
parsed = true;
}
}
if (!parsed) {
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
while (*src == '/') {
++src;
}
}
string::difference_type path_length = dst - path.begin();
if (path_length != 0) {
if (path_length > 1 && path[path_length - 1] == '/') {
--path_length;
}
path.resize(path_length);
} else {
path.assign(1, '.');
}
return path;
}
void FileSystem::ParseURI(absl::string_view remaining,
absl::string_view* scheme, absl::string_view* host,
absl::string_view* path) const {
if (!strings::Scanner(remaining)
.One(strings::Scanner::LETTER)
.Many(strings::Scanner::LETTER_DIGIT_DOT)
.StopCapture()
.OneLiteral(":
.GetResult(&remaining, scheme)) {
*scheme = absl::string_view();
*host = absl::string_view();
*path = remaining;
return;
}
if (!strings::Scanner(remaining).ScanUntil('/').GetResult(&remaining, host)) {
*host = remaining;
*path = absl::string_view();
return;
}
*path = remaining;
}
string FileSystem::CreateURI(absl::string_view scheme, absl::string_view host,
absl::string_view path) const {
if (scheme.empty()) {
return string(path);
}
return strings::StrCat(scheme, ":
}
std::string FileSystem::DecodeTransaction(const TransactionToken* token) {
if (token) {
std::stringstream oss;
oss << "Token= " << token->token << ", Owner=" << token->owner;
return oss.str();
}
return "No Transaction";
}
} | #include "tensorflow/core/platform/file_system.h"
#include <sys/stat.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/null_file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
static const char* const kPrefix = "ipfs:
class InterPlanetaryFileSystem : public NullFileSystem {
public:
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
Status FileExists(const string& fname, TransactionToken* token) override {
string parsed_path;
ParsePath(fname, &parsed_path);
if (BodyExists(parsed_path)) {
return absl::OkStatus();
}
return Status(absl::StatusCode::kNotFound, "File does not exist");
}
Status CreateDir(const string& dirname, TransactionToken* token) override {
string parsed_path;
ParsePath(dirname, &parsed_path);
if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) {
return Status(absl::StatusCode::kAlreadyExists,
"dirname already exists.");
}
std::vector<string> split_path = str_util::Split(parsed_path, '/');
if (split_path.size() > 3) {
return Status(absl::StatusCode::kInvalidArgument, "Bad dirname");
}
if (split_path.empty()) {
return absl::OkStatus();
}
if (split_path.size() == 1) {
celestial_bodies_[""].insert(parsed_path);
celestial_bodies_.insert(
std::pair<string, std::set<string>>(parsed_path, {}));
return absl::OkStatus();
}
if (split_path.size() == 2) {
if (!BodyExists(split_path[0])) {
return Status(absl::StatusCode::kFailedPrecondition,
"Base dir not created");
}
celestial_bodies_[split_path[0]].insert(split_path[1]);
celestial_bodies_.insert(
std::pair<string, std::set<string>>(parsed_path, {}));
return absl::OkStatus();
}
if (split_path.size() == 3) {
const string& parent_path = this->JoinPath(split_path[0], split_path[1]);
if (!BodyExists(parent_path)) {
return Status(absl::StatusCode::kFailedPrecondition,
"Base dir not created");
}
celestial_bodies_[parent_path].insert(split_path[2]);
celestial_bodies_.insert(
std::pair<string, std::set<string>>(parsed_path, {}));
return absl::OkStatus();
}
return Status(absl::StatusCode::kFailedPrecondition, "Failed to create");
}
Status IsDirectory(const string& dirname, TransactionToken* token) override {
string parsed_path;
ParsePath(dirname, &parsed_path);
if (parsed_path == "evil_directory") {
LOG(FATAL) << "evil_directory cannot be accessed";
}
std::vector<string> split_path = str_util::Split(parsed_path, '/');
if (split_path.size() > 2) {
return Status(absl::StatusCode::kFailedPrecondition, "Not a dir");
}
if (celestial_bodies_.find(parsed_path) != celestial_bodies_.end()) {
return absl::OkStatus();
}
return Status(absl::StatusCode::kFailedPrecondition, "Not a dir");
}
Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
TF_RETURN_IF_ERROR(IsDirectory(dir, nullptr));
string parsed_path;
ParsePath(dir, &parsed_path);
result->insert(result->begin(), celestial_bodies_[parsed_path].begin(),
celestial_bodies_[parsed_path].end());
return absl::OkStatus();
}
private:
bool BodyExists(const string& name) {
return celestial_bodies_.find(name) != celestial_bodies_.end();
}
void ParsePath(const string& name, string* parsed_path) {
StringPiece scheme, host, path;
this->ParseURI(name, &scheme, &host, &path);
ASSERT_EQ(scheme, "ipfs");
ASSERT_EQ(host, "solarsystem");
absl::ConsumePrefix(&path, "/");
*parsed_path = string(path);
}
std::map<string, std::set<string>> celestial_bodies_ = {
std::pair<string, std::set<string>>(
"", {"Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn",
"Uranus", "Neptune"}),
std::pair<string, std::set<string>>("Mercury", {}),
std::pair<string, std::set<string>>("Venus", {}),
std::pair<string, std::set<string>>("Earth", {"Moon"}),
std::pair<string, std::set<string>>("Mars", {}),
std::pair<string, std::set<string>>("Jupiter",
{"Europa", "Io", "Ganymede"}),
std::pair<string, std::set<string>>("Saturn", {}),
std::pair<string, std::set<string>>("Uranus", {}),
std::pair<string, std::set<string>>("Neptune", {}),
std::pair<string, std::set<string>>("Earth/Moon", {}),
std::pair<string, std::set<string>>("Jupiter/Europa", {}),
std::pair<string, std::set<string>>("Jupiter/Io", {}),
std::pair<string, std::set<string>>("Jupiter/Ganymede", {})};
};
string Match(InterPlanetaryFileSystem* ipfs, const string& suffix_pattern) {
std::vector<string> results;
Status s = ipfs->GetMatchingPaths(ipfs->JoinPath(kPrefix, suffix_pattern),
nullptr, &results);
if (!s.ok()) {
return s.ToString();
} else {
std::vector<StringPiece> trimmed_results;
std::sort(results.begin(), results.end());
for (const string& result : results) {
StringPiece trimmed_result(result);
EXPECT_TRUE(
absl::ConsumePrefix(&trimmed_result, strings::StrCat(kPrefix, "/")));
trimmed_results.push_back(trimmed_result);
}
return absl::StrJoin(trimmed_results, ",");
}
}
TEST(InterPlanetaryFileSystemTest, IPFSMatch) {
InterPlanetaryFileSystem ipfs;
EXPECT_EQ(Match(&ipfs, "thereisnosuchfile"), "");
EXPECT_EQ(Match(&ipfs, "*"),
"Earth,Jupiter,Mars,Mercury,Neptune,Saturn,Uranus,Venus");
EXPECT_EQ(Match(&ipfs, "Jupiter*"),
"Earth/Moon,Jupiter/Europa,Jupiter/Ganymede,Jupiter/Io");
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "Planet0"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "Planet1"), nullptr));
EXPECT_EQ(Match(&ipfs, "Planet[0-1]"), "Planet0,Planet1");
EXPECT_EQ(Match(&ipfs, "Planet?"), "Planet0,Planet1");
}
TEST(InterPlanetaryFileSystemTest, MatchSimple) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-00"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-0a"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-01"), nullptr));
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "match-aaa"), nullptr));
EXPECT_EQ(Match(&ipfs, "match-*"), "match-00,match-01,match-0a,match-aaa");
EXPECT_EQ(Match(&ipfs, "match-0[0-9]"), "match-00,match-01");
EXPECT_EQ(Match(&ipfs, "match-?[0-9]"), "match-00,match-01");
EXPECT_EQ(Match(&ipfs, "match-?a*"), "match-0a,match-aaa");
EXPECT_EQ(Match(&ipfs, "match-??"), "match-00,match-01,match-0a");
}
TEST(InterPlanetaryFileSystemTest, MatchOnlyNeeded) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.CreateDir(ipfs.JoinPath(kPrefix, "abcd"), nullptr));
TF_EXPECT_OK(
ipfs.CreateDir(ipfs.JoinPath(kPrefix, "evil_directory"), nullptr));
EXPECT_EQ(Match(&ipfs, "abcd"), "abcd");
}
TEST(InterPlanetaryFileSystemTest, MatchDirectory) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/x"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-0a/abc/x"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/x"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-aaa/abc/x"), nullptr));
EXPECT_EQ(Match(&ipfs, "match-*/abc/x"),
"match-00/abc/x,match-01/abc/x,match-0a/abc/x,match-aaa/abc/x");
EXPECT_EQ(Match(&ipfs, "match-0[0-9]/abc/x"),
"match-00/abc/x,match-01/abc/x");
EXPECT_EQ(Match(&ipfs, "match-?[0-9]/abc/x"),
"match-00/abc/x,match-01/abc/x");
EXPECT_EQ(Match(&ipfs, "match-?a*/abc/x"), "match-0a/abc/x,match-aaa/abc/x");
EXPECT_EQ(Match(&ipfs, "match-?[^a]/abc/x"), "match-00/abc/x,match-01/abc/x");
}
TEST(InterPlanetaryFileSystemTest, MatchMultipleWildcards) {
InterPlanetaryFileSystem ipfs;
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/00"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/01"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-00/abc/09"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/00"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/04"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-01/abc/10"), nullptr));
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(
ipfs.JoinPath(kPrefix, "match-02/abc/00"), nullptr));
EXPECT_EQ(Match(&ipfs, "match-0[0-1]/abc/0[0-8]"),
"match-00/abc/00,match-00/abc/01,match-01/abc/00,match-01/abc/04");
}
TEST(InterPlanetaryFileSystemTest, RecursivelyCreateAlreadyExistingDir) {
InterPlanetaryFileSystem ipfs;
const string dirname = ipfs.JoinPath(kPrefix, "match-00/abc/00");
TF_EXPECT_OK(ipfs.RecursivelyCreateDir(dirname));
}
TEST(InterPlanetaryFileSystemTest, HasAtomicMove) {
InterPlanetaryFileSystem ipfs;
const string dirname = io::JoinPath(kPrefix, "match-00/abc/00");
bool has_atomic_move;
TF_EXPECT_OK(ipfs.HasAtomicMove(dirname, &has_atomic_move));
EXPECT_EQ(has_atomic_move, true);
}
TEST(InterPlanetaryFileSystemTest, CanCreateTempFile) {
InterPlanetaryFileSystem ipfs;
const string dirname = io::JoinPath(kPrefix, "match-00/abc/00");
bool can_create_temp_file;
TF_EXPECT_OK(ipfs.CanCreateTempFile(dirname, &can_create_temp_file));
EXPECT_EQ(can_create_temp_file, true);
}
class TestFileSystem : public NullFileSystem {
public:
Status IsDirectory(const string& dirname, TransactionToken* token) override {
if (dirname == "." || dirname.empty()) {
return absl::OkStatus();
}
return Status(absl::StatusCode::kFailedPrecondition, "Not a dir");
}
Status GetChildren(const string& dir, TransactionToken* token,
std::vector<string>* result) override {
if (dir == "." || dir.empty()) {
result->push_back("test");
}
return absl::OkStatus();
}
};
TEST(TestFileSystemTest, RootDirectory) {
TestFileSystem fs;
std::vector<string> results;
auto ret = fs.GetMatchingPaths("./te*", nullptr, &results);
EXPECT_EQ(1, results.size());
EXPECT_EQ("./test", results[0]);
ret = fs.GetMatchingPaths("te*", nullptr, &results);
EXPECT_EQ(1, results.size());
EXPECT_EQ("./test", results[0]);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/file_system.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/file_system_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
08422a02-e75f-4e3b-88eb-cda17385701d | cpp | tensorflow/tensorflow | while_loop_fusible_sinking | third_party/xla/xla/service/while_loop_fusible_sinking.cc | third_party/xla/xla/service/while_loop_fusible_sinking_test.cc | #include "xla/service/while_loop_fusible_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsPurelyExpanding(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kBroadcast ||
(instr->opcode() == HloOpcode::kConstant &&
instr->shape().rank() == 0) ||
instr->opcode() == HloOpcode::kIota;
}
bool IsFusionCandidate(const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kRng &&
(instr->IsElementwise() || instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose);
}
}
bool WhileLoopFusibleSinking::IsSinkableFusion(HloInstruction* while_operand) {
absl::InlinedVector<HloInstruction*, 8> worklist;
absl::flat_hash_set<int> visited;
worklist.push_back(while_operand);
while (!worklist.empty()) {
HloInstruction* to_process = worklist.back();
worklist.pop_back();
if (!to_process->IsFusible()) {
return false;
}
if (!visited.insert(to_process->unique_id()).second) {
if (visited.size() > 100) {
return false;
}
continue;
}
if (IsPurelyExpanding(to_process)) {
continue;
}
if (IsFusionCandidate(to_process)) {
for (auto* op : to_process->operands()) {
worklist.push_back(op);
}
continue;
}
return false;
}
return true;
}
HloInstruction* WhileLoopFusibleSinking::CreateSinkableFusion(
HloInstruction* while_operand) {
HloInstruction* fusion =
while_operand->AddInstruction(while_operand->CreateFusion(
while_operand->shape(), HloInstruction::FusionKind::kLoop,
while_operand));
bool did_fuse = IsFusionCandidate(while_operand);
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
continue;
}
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
did_fuse = true;
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
}
return fusion;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::TrySinkingFusiblesIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
if (call_counts_[while_body] > 1 || call_counts_[while_cond] > 1) {
return false;
}
HloInstruction* init_value = while_instr->mutable_operand(0);
if (init_value->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
std::vector<int64_t> tuple_indices;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
if (while_instr->operand_count() == 0 || init_value->operand_count() == 0) {
CHECK_EQ(while_instr->user_count(), 0);
VLOG(3) << "Each element in the operand tuple of the while instruction '"
<< while_instr->name()
<< "' was an invariant value, whose usage has been replaced "
" directly by the value.";
break;
}
HloInstruction* invariant_value = init_value->mutable_operand(index);
if (absl::c_any_of(invariant_body_gte->users(),
[](const HloInstruction* use) {
switch (use->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kSlice:
return true;
default:
return false;
}
})) {
continue;
}
if (init_value->IsRoot() || init_value->user_count() > 1) {
init_value = init_value->AddInstruction(init_value->Clone());
TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(0, init_value));
}
if (!IsSinkableFusion(invariant_value)) {
continue;
}
HloInstruction* fusion = CreateSinkableFusion(invariant_value);
changed = true;
if (fusion->operand_count() > 0 &&
(while_instr->IsRoot() ||
absl::c_any_of(while_instr->users(), [&](HloInstruction* use) {
return use->opcode() != HloOpcode::kGetTupleElement;
}))) {
auto uses = while_instr->users();
std::vector<HloInstruction*> gtes(init_value->operand_count());
for (int64_t i = 0; i < gtes.size(); ++i) {
gtes[i] = while_instr->AddInstruction(
HloInstruction::CreateGetTupleElement(while_instr, i));
}
HloInstruction* tuple =
while_instr->AddInstruction(HloInstruction::CreateTuple(gtes));
if (while_instr->IsRoot()) {
while_instr->parent()->set_root_instruction(tuple);
}
if (!uses.empty()) {
TF_RETURN_IF_ERROR(while_instr->ReplaceUsesWith(uses, tuple));
}
}
absl::InlinedVector<HloInstruction*, 2> invariant_output_uses;
for (auto use : while_instr->users()) {
if (use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index) {
invariant_output_uses.push_back(use);
}
}
for (auto use : invariant_output_uses) {
TF_RETURN_IF_ERROR(
while_instr->parent()->ReplaceInstruction(use, invariant_value));
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* parameter = while_body->parameter_instruction(0);
tuple_indices.resize(fusion->operand_count());
int64_t next_index = init_value->operand_count();
new_operands.resize(fusion->operand_count());
for (int64_t i = 0; i < fusion->operand_count(); ++i) {
init_value->AppendOperand(fusion->mutable_operand(i));
parameter->mutable_shape()->mutable_tuple_shapes()->push_back(
fusion->mutable_operand(i)->shape());
new_operands[i] = root->AddInstruction(
HloInstruction::CreateGetTupleElement(parameter, next_index++));
root->AppendOperand(new_operands[i]);
}
*(init_value->mutable_shape()) = parameter->shape();
*(while_instr->mutable_shape()) = parameter->shape();
*(while_cond->parameter_instruction(0)->mutable_shape()) =
parameter->shape();
*(root->mutable_shape()) = parameter->shape();
auto cloned_fusion = while_body->AddInstruction(
fusion->CloneWithNewOperands(fusion->shape(), new_operands));
TF_RETURN_IF_ERROR(fusion->parent()->RemoveInstruction(fusion));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstruction(invariant_body_gte, cloned_fusion));
TF_RETURN_IF_ERROR(cloned_fusion->Defuse());
}
return changed;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_counts_.clear();
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
call_counts_[while_instr->while_body()]++;
call_counts_[while_instr->while_condition()]++;
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingFusiblesIntoWhileLoop(while_instr));
changed |= result;
}
return changed;
}
} | #include "xla/service/while_loop_fusible_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopFusibleSinkingTest = HloTestBase;
TEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] parameter(0)
const_1 = f32[2] iota(), iota_dimension=0
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Iota()), _));
}
TEST_F(WhileLoopFusibleSinkingTest, SinkMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()),
op::Broadcast())),
_, _));
}
TEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
z = s32[] constant(0)
j = s32[] constant(3)
ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7}
r = f32[7] reshape(ds)
b = f32[5,7] broadcast(r), dimensions={1}
a = add(b, p_body.0)
add.0 = add(a, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2467c13b-8b56-471c-b969-75f56b4ce64b | cpp | tensorflow/tensorflow | xla_sharding_util | tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc | tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <map>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/service/hlo_parser.h"
#include "xla/tsl/lib/math/math_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace {
constexpr char kNumSplitAttr[] = "num_split";
int64_t GetPadding(const int split_dim, const int num_splits,
const PartialTensorShape& partial_tensor_shape) {
if (partial_tensor_shape.dim_size(split_dim) <= 0) {
return 0;
}
int64_t per_split_size = tsl::MathUtil::CeilOfRatio<int64_t>(
partial_tensor_shape.dim_size(split_dim), num_splits);
int64_t total_padding =
per_split_size * num_splits - partial_tensor_shape.dim_size(split_dim);
return total_padding;
}
mlir::TF::SliceOp CreateSliceOp(mlir::OpBuilder* builder,
const mlir::Location& location,
mlir::Value input,
const PartialTensorShape& shape) {
mlir::SmallVector<int64_t, 4> slice_start_position;
for (int i = 0; i < shape.dims(); ++i) {
slice_start_position.push_back(0);
}
mlir::SmallVector<int64_t, 4> slice_size;
for (int i = 0; i < shape.dims(); ++i) {
slice_size.push_back(shape.dim_size(i));
}
auto start_position_type =
mlir::RankedTensorType::get(shape.dims(), builder->getIntegerType(64));
auto start_position_op = builder->create<mlir::TF::ConstOp>(
input.getLoc(), mlir::DenseIntElementsAttr::get(start_position_type,
slice_start_position));
auto slice_size_op = builder->create<mlir::TF::ConstOp>(
input.getLoc(), mlir::DenseIntElementsAttr::get(
mlir::RankedTensorType::get(
shape.dims(), builder->getIntegerType(64)),
slice_size));
auto slice_result_type =
mlir::RankedTensorType::get(slice_size, getElementTypeOrSelf(input));
return builder->create<mlir::TF::SliceOp>(input.getLoc(), slice_result_type,
input, start_position_op,
slice_size_op);
}
mlir::TF::PadOp CreatePadOp(mlir::OpBuilder* builder,
const mlir::Location& location, int64_t num_dims,
int64_t split_dim, mlir::Value src_input,
int64_t padding) {
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
llvm::SmallVector<int64_t, 4> padding_values;
std::vector<int64_t> padded_shape;
for (int i = 0; i < num_dims; ++i) {
padding_values.push_back(0);
if (i == split_dim) {
padding_values.push_back(padding);
padded_shape.push_back(input_type.getShape()[i] + padding);
} else {
padding_values.push_back(0);
padded_shape.push_back(input_type.getShape()[i]);
}
}
auto padding_type =
mlir::RankedTensorType::get({num_dims, 2}, builder->getIntegerType(64));
auto paddings = mlir::DenseIntElementsAttr::get(padding_type, padding_values);
auto paddings_value = builder->create<mlir::TF::ConstOp>(location, paddings);
mlir::SmallVector<int64_t, 4> expand_shape(padded_shape.begin(),
padded_shape.end());
auto expand_result_type =
mlir::RankedTensorType::get(expand_shape, input_type.getElementType());
return builder->create<mlir::TF::PadOp>(location, expand_result_type,
src_input, paddings_value);
}
mlir::LogicalResult CreateSplitOp(
const int num_split, const int split_dimension, const int64_t padding,
const mlir::Location& location, mlir::Value src_input,
mlir::OpBuilder* builder, mlir::TF::SplitOp* split_op,
bool is_ici_weight_dist_spmd) {
if (padding > 0) {
int64_t num_dims =
mlir::cast<mlir::TensorType>(src_input.getType()).getRank();
auto pad_op = CreatePadOp(builder, location, num_dims, split_dimension,
src_input, padding);
if (is_ici_weight_dist_spmd) {
pad_op->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
src_input = pad_op.getResult();
}
auto split_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto split_dimension_attr =
mlir::DenseElementsAttr::get(split_dim_type, split_dimension);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
if (input_type.hasRank()) {
if (input_type.getShape()[split_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
if (shape[split_dimension] % num_split != 0) {
return mlir::emitError(
location,
llvm::formatv(
"incorrect input sharding configuration received. "
"{0}-th dimension of the input must be evenly divisible by {1}",
split_dimension, num_split));
}
shape[split_dimension] = shape[split_dimension] / num_split;
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
auto split_dimension_op = builder->create<mlir::TF::ConstOp>(
location, split_dim_type, split_dimension_attr);
if (is_ici_weight_dist_spmd) {
split_dimension_op->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
llvm::SmallVector<mlir::Type, 4> output_types(num_split, output_type);
*split_op = builder->create<mlir::TF::SplitOp>(
location, output_types, split_dimension_op.getOutput(), src_input);
(*split_op)->setAttr(
kNumSplitAttr,
builder->getIntegerAttr(builder->getIntegerType(32), num_split));
if (is_ici_weight_dist_spmd) {
(*split_op)->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
return mlir::success();
}
mlir::TF::ConcatOp CreateConcatOp(const int concat_dimension,
const mlir::Location& location,
const int64_t padding,
mlir::ArrayRef<mlir::Value> inputs,
mlir::OpBuilder* builder) {
auto concat_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto concat_dimension_attr =
mlir::DenseElementsAttr::get(concat_dim_type, concat_dimension);
auto concat_dimension_op = builder->create<mlir::TF::ConstOp>(
location, concat_dim_type, concat_dimension_attr);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
if (input_type.hasRank()) {
if (input_type.getShape()[concat_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
shape[concat_dimension] = shape[concat_dimension] * inputs.size();
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
return builder->create<mlir::TF::ConcatOp>(
location, output_type, concat_dimension_op.getOutput(), inputs);
}
mlir::TF::XlaConcatNDOp CreateXlaConcatNDOp(
const mlir::Location& location, mlir::ArrayRef<mlir::Value> inputs,
const std::vector<int64_t>& num_concats,
const std::vector<int64_t>& paddings, mlir::OpBuilder& builder) {
llvm::SmallVector<int64_t, 4> output_shape;
if (inputs.empty()) {
mlir::emitError(location, "inputs list to concat ops is empty");
return nullptr;
}
if (num_concats.size() != paddings.size()) {
mlir::emitError(location,
"num_concats and paddings must be of the same length.");
return nullptr;
}
auto input_slice_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
auto element_type = input_slice_type.getElementType();
mlir::Type output_type;
if (input_slice_type.hasRank()) {
const auto& slice_shape = input_slice_type.getShape();
for (int i = 0; i < num_concats.size(); i++) {
auto num_concat = num_concats[i];
const int max_dim_size = slice_shape[i] * num_concat;
output_shape.emplace_back(max_dim_size - paddings[i]);
}
VLOG(2) << "SL: CreateXlaConcatNDOp. output_shape="
<< absl::StrJoin(output_shape, ",")
<< ", Padding=" << absl::StrJoin(paddings, ",");
output_type = mlir::RankedTensorType::get(output_shape, element_type);
} else {
output_type = input_slice_type;
}
auto op = builder.create<mlir::TF::XlaConcatNDOp>(
location, output_type, inputs, builder.getI64ArrayAttr(num_concats),
builder.getI64ArrayAttr(paddings));
return op;
}
mlir::LogicalResult CreateXlaSplitNDOp(const mlir::Location& location,
mlir::Value src_input,
const std::vector<int64_t>& num_splits,
const std::vector<int64_t>& paddings,
mlir::OpBuilder* builder,
mlir::TF::XlaSplitNDOp* xla_split_op,
bool is_ici_weight_dist_spmd) {
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
mlir::Type output_type;
if (!input_type.hasRank()) {
mlir::emitError(
location,
"XLA Split/Concat ops are supported only for Ranked Tensors.");
return mlir::failure();
}
const int rank = input_type.getRank();
const auto& input_shape = input_type.getShape();
auto output_slice_shape = llvm::to_vector<4>(input_type.getShape());
int num_tiles = 1;
if (num_splits.size() != rank || num_splits.size() != paddings.size()) {
return mlir::failure();
}
for (int i = 0; i < rank; ++i) {
if (input_shape[i] == mlir::ShapedType::kDynamic) {
output_slice_shape[i] = input_shape[i];
} else {
output_slice_shape[i] = ((input_shape[i] + paddings[i]) / num_splits[i]);
}
num_tiles *= num_splits[i];
}
output_type = mlir::RankedTensorType::get(output_slice_shape,
input_type.getElementType());
llvm::SmallVector<mlir::Type, 4> output_types(num_tiles, output_type);
VLOG(2) << "SL: CreateXlaSplitNDOp. input_shape="
<< absl::StrJoin(input_shape, ",")
<< ", Padding: " << absl::StrJoin(paddings, ",");
*xla_split_op = builder->create<mlir::TF::XlaSplitNDOp>(
location, output_types, src_input, builder->getI64ArrayAttr(num_splits),
builder->getI64ArrayAttr(paddings));
if (is_ici_weight_dist_spmd) {
(*xla_split_op)
->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
return mlir::success();
}
bool IsShapeKnown(mlir::TensorType type) {
if (!type.hasRank()) return false;
bool shape_known = false;
for (int i = 0; i < type.getRank(); ++i) {
if (type.getShape()[i] == mlir::ShapedType::kDynamic) {
shape_known = false;
break;
} else {
shape_known = true;
}
}
return shape_known;
}
mlir::LogicalResult HandleTileShardedInputsUsingXlaSplitOps(
const mlir::Location& location, const xla::OpSharding& input_sharding,
const mlir::Value& original_source, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<mlir::Value>* tiled_inputs,
bool is_ici_weight_dist_spmd) {
std::vector<int64_t> num_splits(
input_sharding.tile_assignment_dimensions().begin(),
input_sharding.replicate_on_last_tile_dim()
? std::prev(input_sharding.tile_assignment_dimensions().end())
: input_sharding.tile_assignment_dimensions().end());
const int rank = input_sharding.replicate_on_last_tile_dim()
? input_sharding.tile_assignment_dimensions_size() - 1
: input_sharding.tile_assignment_dimensions_size();
std::vector<int64_t> paddings;
paddings.reserve(rank);
auto shape = llvm::to_vector<4>(
original_source.getType().cast<mlir::TensorType>().getShape());
for (int dim = 0; dim < rank; ++dim) {
paddings.push_back(
GetPadding(dim, input_sharding.tile_assignment_dimensions(dim),
PartialTensorShape(shape)));
}
mlir::TF::XlaSplitNDOp xla_split_op;
if (mlir::failed(CreateXlaSplitNDOp(location, original_source, num_splits,
paddings, builder, &xla_split_op,
is_ici_weight_dist_spmd))) {
return mlir::failure();
}
tiled_inputs->clear();
tiled_inputs->reserve(input_sharding.tile_assignment_devices_size());
int64_t repeat_count =
input_sharding.replicate_on_last_tile_dim()
? *input_sharding.tile_assignment_dimensions().rbegin()
: 1;
for (int i = 0; i < xla_split_op.getResults().size(); i++) {
auto split_op_output = xla_split_op.getResults()[i];
for (int64_t j = 0; j < repeat_count; ++j) {
tiled_inputs->push_back(split_op_output);
}
}
return mlir::success();
}
mlir::LogicalResult HandleTileShardedInputsUsingTfSplitOps(
const mlir::Location& location, const xla::OpSharding& input_sharding,
const mlir::Value& original_source, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<mlir::Value>* tiled_inputs,
bool is_ici_weight_dist_spmd) {
llvm::SmallVector<mlir::TF::SplitOp, 4> split_ops_for_tiled_input;
split_ops_for_tiled_input.reserve(
input_sharding.tile_assignment_devices_size());
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(input_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
PartialTensorShape shape;
const auto input_type =
mlir::cast<mlir::TensorType>(original_source.getType());
bool input_shape_known = IsShapeKnown(input_type);
if (input_shape_known) {
shape = PartialTensorShape(input_type.getShape());
}
for (const auto& dimension_and_num_splits : *dimension_to_splits_map) {
const int dimension = dimension_and_num_splits.first;
const int num_splits = dimension_and_num_splits.second;
int padding = input_shape_known
? GetPadding(dimension, num_splits,
PartialTensorShape(input_type.getShape()))
: 0;
if (split_ops_for_tiled_input.empty()) {
mlir::TF::SplitOp root_split_op;
auto result = CreateSplitOp(num_splits, dimension, padding, location,
original_source, builder, &root_split_op,
is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
split_ops_for_tiled_input.emplace_back(root_split_op);
continue;
}
llvm::SmallVector<mlir::TF::SplitOp, 4> new_split_ops;
new_split_ops.reserve(split_ops_for_tiled_input.size() * num_splits);
for (auto split_op : split_ops_for_tiled_input) {
for (auto parent_split_output_value : split_op.getResults()) {
mlir::TF::SplitOp child_split_op;
auto result = CreateSplitOp(num_splits, dimension, padding, location,
parent_split_output_value, builder,
&child_split_op, is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
new_split_ops.emplace_back(child_split_op);
}
}
std::swap(new_split_ops, split_ops_for_tiled_input);
}
tiled_inputs->clear();
tiled_inputs->reserve(input_sharding.tile_assignment_devices_size());
for (auto split_op : split_ops_for_tiled_input) {
for (auto split_op_output : split_op.getResults()) {
int64_t repeat_count =
input_sharding.replicate_on_last_tile_dim()
? *input_sharding.tile_assignment_dimensions().rbegin()
: 1;
for (int64_t i = 0; i < repeat_count; ++i) {
tiled_inputs->push_back(split_op_output);
}
}
}
return mlir::success();
}
bool UnsupportedPartitionedShardingType(xla::OpSharding::Type sharding) {
return sharding != xla::OpSharding::REPLICATED &&
sharding != xla::OpSharding::OTHER;
}
}
absl::StatusOr<std::map<int, int>> GetDimensionIndicesAndNumSplitsFromSharding(
const xla::OpSharding& sharding) {
int64_t tensor_tile_rank = sharding.tile_assignment_dimensions_size();
if (sharding.replicate_on_last_tile_dim()) {
tensor_tile_rank--;
}
std::map<int, int> dimension_to_splits_map;
for (int dim_index = 0; dim_index < tensor_tile_rank; ++dim_index) {
if (sharding.tile_assignment_dimensions(dim_index) > 1) {
dimension_to_splits_map.emplace(
dim_index, sharding.tile_assignment_dimensions(dim_index));
}
}
if (dimension_to_splits_map.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Arg has unnecessary tiled sharding: ", sharding.DebugString()));
}
return dimension_to_splits_map;
}
int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding) {
return xla_sharding.tile_assignment_dimensions_size() -
(xla_sharding.replicate_on_last_tile_dim() ? 1 : 0) -
xla_sharding.last_tile_dims_size();
}
bool IsOtherReplicatedSharding(const xla::OpSharding& xla_sharding) {
int max_dim = GetDimsFromXLAShardingTiled(xla_sharding);
for (int i = 0; i < max_dim; ++i) {
if (xla_sharding.tile_assignment_dimensions(i) != 1) {
return false;
}
}
return xla_sharding.type() == xla::OpSharding::OTHER &&
(xla_sharding.replicate_on_last_tile_dim() ||
!xla_sharding.last_tile_dims().empty());
}
bool IsSplitSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::OTHER &&
!IsOtherReplicatedSharding(sharding);
}
bool IsReplicatedSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::REPLICATED ||
IsOtherReplicatedSharding(sharding);
}
mlir::LogicalResult DecodeShardingAttribute(const std::string& shard_str,
xla::OpSharding& sharding,
bool report_error) {
if (sharding.ParseFromString(shard_str)) return mlir::success();
absl::StatusOr<xla::HloSharding> sharding_hlo = xla::ParseSharding(shard_str);
if (sharding_hlo.ok()) {
sharding = sharding_hlo->ToProto();
return mlir::success();
}
if (report_error)
llvm::errs() << std::string(sharding_hlo.status().message()) << "\n";
return mlir::failure();
}
mlir::LogicalResult DecodeShardingAttribute(mlir::Attribute shard_attr,
xla::OpSharding& sharding,
bool report_error) {
if (!mlir::isa<mlir::StringAttr>(shard_attr)) return mlir::failure();
auto shard_str = mlir::cast<mlir::StringAttr>(shard_attr).getValue().str();
return DecodeShardingAttribute(shard_str, sharding, report_error);
}
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str) {
if (!op->hasAttrOfType<mlir::StringAttr>(shard_str)) return;
::xla::OpSharding sharding;
auto sharding_proto_str =
op->getAttrOfType<mlir::StringAttr>(shard_str).getValue().str();
if (!sharding.ParseFromString(sharding_proto_str)) return;
auto hlosharding = xla::HloSharding::FromProto(sharding);
if (!hlosharding.ok()) {
op->emitError("Unable to encode sharding to human readable ")
<< hlosharding.status().message();
return;
}
op->setAttr(shard_str,
mlir::StringAttr::get(op->getContext(), hlosharding->ToString()));
}
mlir::LogicalResult ExtractInputsForLogicalDevices(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list) {
return ExtractInputsForLogicalDevices(num_cores_per_replica, cluster_func,
builder, false,
input_list);
}
mlir::LogicalResult ExtractInputsForLogicalDevices(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func, mlir::OpBuilder* builder,
bool use_xla_nd_ops,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list) {
input_list->reserve(num_cores_per_replica);
for (int i = 0; i < num_cores_per_replica; ++i)
input_list->emplace_back(llvm::SmallVector<mlir::Value, 4>());
llvm::SmallVector<mlir::Value, 4> cluster_func_inputs(
cluster_func.getOperands());
auto sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kInputShardingAttr);
if (!sharding_attrs) {
(*input_list)[0] = cluster_func_inputs;
return mlir::success();
}
for (const auto& sharding_attr_and_index : llvm::enumerate(sharding_attrs)) {
const auto& sharding_attr = sharding_attr_and_index.value();
const auto input_index = sharding_attr_and_index.index();
const auto& input_value = cluster_func_inputs[input_index];
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(sharding_attr).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for inputs");
}
const auto input_sharding_type = sharding.type();
auto tiled_sharding_mismatched = [&](int tiled_input_size) {
return cluster_func.emitError(
llvm::formatv("incorrect {0}-th tiled input sharding received. "
"Product of tile sharding splits({1}) must be equal to "
"number of logical devices : {2}",
input_index, tiled_input_size, num_cores_per_replica));
};
if (auto partitioned_input =
llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
input_value.getDefiningOp())) {
if (UnsupportedPartitionedShardingType(input_sharding_type))
return cluster_func->emitOpError()
<< "unsupported input sharding type "
<< OpSharding_Type_Name(input_sharding_type) << " for "
<< input_index << "-th input";
if (input_sharding_type == xla::OpSharding::REPLICATED) {
for (const auto& index_and_inputs : llvm::enumerate(*input_list)) {
index_and_inputs.value().emplace_back(
partitioned_input.getOperand(index_and_inputs.index()));
}
} else {
assert(input_sharding_type == xla::OpSharding::OTHER);
if (partitioned_input.getInputs().size() != num_cores_per_replica)
return tiled_sharding_mismatched(
partitioned_input.getInputs().size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device =
sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(
partitioned_input.getInputs()[i]);
}
}
continue;
}
if (IsSplitSharding(sharding)) {
bool is_ici_weight_dist_spmd =
cluster_func.getOperand(input_index).getDefiningOp() &&
cluster_func.getOperand(input_index)
.getDefiningOp()
->hasAttr(kICIWeightDistributionMlirBridgeMarker);
llvm::SmallVector<mlir::Value, 4> tiled_inputs;
if (use_xla_nd_ops) {
auto result = HandleTileShardedInputsUsingXlaSplitOps(
cluster_func.getLoc(), sharding, input_value, builder,
&tiled_inputs, is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
} else {
auto result = HandleTileShardedInputsUsingTfSplitOps(
cluster_func.getLoc(), sharding, input_value, builder,
&tiled_inputs, is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
}
const int64_t tiled_inputs_size = tiled_inputs.size();
if (tiled_inputs_size != num_cores_per_replica)
return tiled_sharding_mismatched(tiled_inputs.size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device = sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(tiled_inputs[i]);
}
} else if (IsReplicatedSharding(sharding)) {
for (auto& inputs : *input_list) inputs.emplace_back(input_value);
} else {
assert(input_sharding_type == xla::OpSharding::MAXIMAL);
const int logical_device_id = sharding.tile_assignment_devices(0);
(*input_list)[logical_device_id].emplace_back(input_value);
}
}
return mlir::success();
}
mlir::LogicalResult ParseAndValidateOutputSharding(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func,
mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list) {
output_sharding_list->reserve(cluster_func.getNumResults());
const auto output_sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kOutputShardingAttr);
if (!output_sharding_attrs)
return cluster_func.emitError(
"output_sharding_configuration missing from cluster func");
if (output_sharding_attrs.size() != cluster_func.getNumResults())
return cluster_func.emitError("incorrect number of output sharding");
for (const auto& output_sharding_and_index :
llvm::enumerate(output_sharding_attrs)) {
const auto& output_sharding = output_sharding_and_index.value();
const int sharding_index = output_sharding_and_index.index();
if (!mlir::isa<mlir::StringAttr>(output_sharding))
return cluster_func.emitError(llvm::formatv(
"non-string output sharding at index {0}", sharding_index));
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(output_sharding).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for outputs");
}
if (sharding.type() == xla::OpSharding::OTHER &&
sharding.tile_assignment_devices_size() != num_cores_per_replica)
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Number of "
"tiled outputs({0}) must match the number of logical "
"devices({1})",
sharding.tile_assignment_devices_size(), num_cores_per_replica));
if (sharding.type() == xla::OpSharding::MAXIMAL &&
((sharding.tile_assignment_devices(0) >= num_cores_per_replica) ||
(sharding.tile_assignment_devices(0) < 0)))
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Maximal "
"sharding should be assigned to device id in range "
"[0, {0}). Currently assigned to {1}",
num_cores_per_replica, sharding.tile_assignment_devices(0)));
output_sharding_list->emplace_back(std::move(sharding));
}
return mlir::success();
}
namespace {
bool IsAssignedToLogicalDevice(const int core_id,
const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::MAXIMAL &&
sharding.tile_assignment_devices(0) == core_id;
}
mlir::LogicalResult LookupClusterToCoreIndex(
const mlir::Location& location,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const int core_id, const int cluster_func_output_index,
int* core_output_index) {
*core_output_index =
cluster_to_core_index[core_id][cluster_func_output_index];
if (*core_output_index == -1) {
mlir::emitError(
location,
llvm::formatv("Attempted to map cluster_func output index {0} to "
"program assigned to core {1}. The tensor at this output "
"index was not assigned or sharded to this core.",
cluster_func_output_index, core_id));
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetTileShardedOutputsToMerge(
const mlir::Location& location, const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
llvm::SmallVector<mlir::Value, 4>* outputs_to_merge) {
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
outputs_to_merge->reserve(sharding.tile_assignment_devices_size());
for (const auto& core_id_and_index :
llvm::enumerate(sharding.tile_assignment_devices())) {
auto core_id = core_id_and_index.value();
auto tile_index = core_id_and_index.index();
int last_tile_dim_size = *sharding.tile_assignment_dimensions().rbegin();
if (sharding.replicate_on_last_tile_dim() &&
tile_index % last_tile_dim_size != 0) {
continue;
}
int region_output_index;
auto status = LookupClusterToCoreIndex(location, cluster_to_core_index,
core_id, cluster_func_output_index,
®ion_output_index);
if (failed(status)) return mlir::failure();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(cluster_idx +
core_id)[region_output_index];
outputs_to_merge->emplace_back(output_from_logical_device);
}
return mlir::success();
}
mlir::LogicalResult HandleTileShardedOutputsUsingXlaConcatOps(
const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const mlir::Location& location, mlir::Value cluster_func_output,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder& builder) {
builder.setInsertionPointAfter(new_parallel_execute);
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
const std::vector<int64_t> num_concats(
sharding.tile_assignment_dimensions().begin(),
sharding.replicate_on_last_tile_dim()
? std::prev(sharding.tile_assignment_dimensions().end())
: sharding.tile_assignment_dimensions().end());
const int rank = sharding.replicate_on_last_tile_dim()
? sharding.tile_assignment_dimensions_size() - 1
: sharding.tile_assignment_dimensions_size();
std::vector<int64_t> paddings;
paddings.reserve(rank);
auto output_type =
mlir::cast<mlir::TensorType>(cluster_func_output.getType());
if (output_type.hasRank()) {
auto shape = llvm::to_vector<4>(output_type.getShape());
for (int dim = 0; dim < rank; ++dim) {
paddings.push_back(GetPadding(dim,
sharding.tile_assignment_dimensions(dim),
PartialTensorShape(shape)));
}
} else {
mlir::emitError(
location, "XLA concat/split ops are supported only for Ranked tensor.");
return mlir::failure();
}
llvm::SmallVector<mlir::Value, 4> outputs_to_merge;
auto status = GetTileShardedOutputsToMerge(
location, cluster_func_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&outputs_to_merge);
if (failed(status)) return mlir::failure();
mlir::TF::XlaConcatNDOp concat_op = CreateXlaConcatNDOp(
location, outputs_to_merge, num_concats, paddings, builder);
cluster_func_output.replaceAllUsesWith(concat_op.getResult());
return mlir::success();
}
mlir::LogicalResult HandleTileShardedOutputsUsingTfConcatOps(
const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const mlir::Location& location, mlir::Value cluster_func_output,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder) {
builder->setInsertionPointAfter(new_parallel_execute);
llvm::SmallVector<mlir::Value, 4> outputs_to_merge;
auto status = GetTileShardedOutputsToMerge(
location, cluster_func_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&outputs_to_merge);
if (failed(status)) return mlir::failure();
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
auto output_type =
mlir::cast<mlir::TensorType>(cluster_func_output.getType());
PartialTensorShape shape;
bool output_shape_known = IsShapeKnown(output_type);
if (output_shape_known) {
shape = PartialTensorShape(output_type.getShape());
}
bool has_paddings = false;
std::vector<int64_t> paddings;
for (auto it = dimension_to_splits_map->rbegin();
it != dimension_to_splits_map->rend(); ++it) {
int concat_dimension = it->first;
int num_splits = it->second;
llvm::SmallVector<mlir::Value, 4> new_outputs;
new_outputs.reserve(num_splits);
for (int i = 0, end = outputs_to_merge.size(); i < end;
i = i + num_splits) {
int64_t padding;
if (output_shape_known) {
padding = GetPadding(concat_dimension, num_splits, shape);
} else {
padding = 0;
}
mlir::TF::ConcatOp concat_op =
CreateConcatOp(concat_dimension, location, padding,
llvm::ArrayRef<mlir::Value>{
outputs_to_merge.begin() + i,
outputs_to_merge.begin() + i + num_splits},
builder);
paddings.push_back(padding);
has_paddings |= padding > 0;
new_outputs.emplace_back(concat_op.getResult());
}
std::swap(new_outputs, outputs_to_merge);
}
assert(outputs_to_merge.size() == 1);
if (has_paddings) {
mlir::TF::SliceOp slice_op =
CreateSliceOp(builder, location, outputs_to_merge[0], shape);
cluster_func_output.replaceAllUsesWith(slice_op.getResult());
}
cluster_func_output.replaceAllUsesWith(outputs_to_merge[0]);
return mlir::success();
}
mlir::LogicalResult ValidateAndGetTiledExecuteOutputShape(
const mlir::Location& location,
const mlir::TensorType cluster_func_output_type,
const xla::OpSharding& output_sharding, bool use_xla_nd_ops,
mlir::Type* tiled_logical_computation_type) {
const auto output_shape = cluster_func_output_type.getShape();
auto new_output_shape = llvm::to_vector<4>(output_shape);
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(output_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (const auto& dimension_and_output_splits : *dimension_to_splits_map) {
const auto dimension = dimension_and_output_splits.first;
const auto output_splits = dimension_and_output_splits.second;
if (output_shape[dimension] == mlir::ShapedType::kDynamic) {
*tiled_logical_computation_type = cluster_func_output_type;
break;
}
if (output_shape[dimension] % output_splits == 0) {
new_output_shape[dimension] = output_shape[dimension] / output_splits;
} else {
new_output_shape[dimension] =
(output_shape[dimension] / output_splits) + 1;
}
}
*tiled_logical_computation_type = mlir::RankedTensorType::get(
new_output_shape, cluster_func_output_type.getElementType());
return mlir::success();
}
}
bool AreInputOutputShapesStaticallyKnownForSplitSharding(
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func) {
bool sharded_input_output_shape_statically_known = true;
llvm::SmallVector<mlir::Value, 4> cluster_func_inputs(
cluster_func.getOperands());
auto sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kInputShardingAttr);
if (sharding_attrs) {
for (const auto& sharding_attr_and_index :
llvm::enumerate(sharding_attrs)) {
const auto& sharding_attr = sharding_attr_and_index.value();
const auto input_index = sharding_attr_and_index.index();
const auto& input_value = cluster_func_inputs[input_index];
const auto input_type =
mlir::cast<mlir::TensorType>(input_value.getType());
xla::OpSharding input_sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(sharding_attr).getValue().str(),
input_sharding)
.failed()) {
sharded_input_output_shape_statically_known = false;
}
if (IsSplitSharding(input_sharding)) {
sharded_input_output_shape_statically_known &= IsShapeKnown(input_type);
}
}
}
for (const auto& result_and_index :
llvm::enumerate(cluster_func.getResults())) {
const auto output_index = result_and_index.index();
const auto& output_sharding = output_sharding_config[output_index];
const auto cluster_func_output_type =
mlir::cast<mlir::TensorType>(result_and_index.value().getType());
if (IsSplitSharding(output_sharding)) {
sharded_input_output_shape_statically_known &=
IsShapeKnown(cluster_func_output_type);
}
}
return sharded_input_output_shape_statically_known;
}
mlir::LogicalResult GetOutputTypesForLogicalDeviceComputation(
const int core_id, llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func,
llvm::SmallVectorImpl<mlir::Type>* output_types,
llvm::SmallVectorImpl<int>* cluster_to_core_index) {
return GetOutputTypesForLogicalDeviceComputation(
core_id, output_sharding_config, cluster_func, output_types,
false, cluster_to_core_index);
}
mlir::LogicalResult GetOutputTypesForLogicalDeviceComputation(
const int core_id, llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func,
llvm::SmallVectorImpl<mlir::Type>* output_types, bool use_xla_nd_ops,
llvm::SmallVectorImpl<int>* cluster_to_core_index) {
output_types->reserve(cluster_func.getNumResults());
int core_index = 0;
for (const auto& result_and_index :
llvm::enumerate(cluster_func.getResults())) {
const auto output_index = result_and_index.index();
const auto& output_sharding = output_sharding_config[output_index];
const auto cluster_func_output_type =
mlir::cast<mlir::TensorType>(result_and_index.value().getType());
if (IsSplitSharding(output_sharding)) {
mlir::Type tiled_logical_computation_type;
if (cluster_func_output_type.hasRank()) {
auto result = ValidateAndGetTiledExecuteOutputShape(
cluster_func.getLoc(), cluster_func_output_type, output_sharding,
use_xla_nd_ops, &tiled_logical_computation_type);
if (mlir::failed(result)) return mlir::failure();
} else {
tiled_logical_computation_type = cluster_func_output_type;
}
cluster_to_core_index->emplace_back(core_index++);
output_types->emplace_back(tiled_logical_computation_type);
} else if (IsReplicatedSharding(output_sharding) ||
IsAssignedToLogicalDevice(core_id, output_sharding)) {
cluster_to_core_index->emplace_back(core_index++);
output_types->emplace_back(cluster_func_output_type);
} else {
cluster_to_core_index->emplace_back(-1);
}
}
return mlir::success();
}
mlir::LogicalResult RemapOutputsFromLogicalDevices(
const mlir::Location& location,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int num_results_pre_cluster,
mlir::tf_device::ParallelExecuteOp old_parallel_execute, int cluster_idx,
mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder) {
return RemapOutputsFromLogicalDevices(
location, output_sharding_config, cluster_to_core_index,
num_results_pre_cluster, old_parallel_execute, cluster_idx,
new_parallel_execute, false, builder);
}
mlir::LogicalResult RemapOutputsFromLogicalDevices(
const mlir::Location& location,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int num_results_pre_cluster,
mlir::tf_device::ParallelExecuteOp old_parallel_execute, int cluster_idx,
mlir::tf_device::ParallelExecuteOp new_parallel_execute,
bool use_xla_nd_ops, mlir::OpBuilder* builder) {
for (auto [output_index, old_parallel_execute_output] :
llvm::enumerate(old_parallel_execute.getResults())) {
if (output_index < num_results_pre_cluster) {
for (auto& use : llvm::make_early_inc_range(
old_parallel_execute->getResult(output_index).getUses())) {
use.set(new_parallel_execute->getResult(output_index));
}
continue;
}
int tpu_cluster_output_index = output_index - num_results_pre_cluster;
const auto& output_sharding =
output_sharding_config[tpu_cluster_output_index];
const auto output_sharding_type = output_sharding.type();
mlir::TF::TPUPartitionedOutputV2Op partitioned_output;
for (auto user : old_parallel_execute_output.getUsers()) {
if (auto partitioned_output_user =
llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedOutputV2Op>(
user)) {
partitioned_output = partitioned_output_user;
break;
}
}
if (partitioned_output) {
if (!old_parallel_execute_output.hasOneUse())
return partitioned_output.emitOpError()
<< "must be a unique user of TPU Cluster "
"(tf_device.old_parallel_execute) output "
<< *old_parallel_execute_output.getOwner();
if (UnsupportedPartitionedShardingType(output_sharding_type))
return old_parallel_execute.emitOpError()
<< "unsupported output sharding type "
<< OpSharding_Type_Name(output_sharding_type) << " for "
<< output_index << "-th output";
if (output_sharding_type == xla::OpSharding::REPLICATED) {
for (const auto& index_and_output :
llvm::enumerate(partitioned_output.getOutput())) {
auto idx = (cluster_idx + index_and_output.index()) %
new_parallel_execute->getNumRegions();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(
idx)[tpu_cluster_output_index];
index_and_output.value().replaceAllUsesWith(
output_from_logical_device);
}
} else {
assert(output_sharding_type == xla::OpSharding::OTHER);
llvm::SmallVector<mlir::Value, 4> tile_sharded_outputs;
if (failed(GetTileShardedOutputsToMerge(
location, tpu_cluster_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&tile_sharded_outputs)))
return mlir::failure();
for (auto result :
llvm::zip(partitioned_output.getOutput(), tile_sharded_outputs))
std::get<0>(result).replaceAllUsesWith(std::get<1>(result));
}
continue;
}
if (IsSplitSharding(output_sharding)) {
if (use_xla_nd_ops) {
auto result = HandleTileShardedOutputsUsingXlaConcatOps(
tpu_cluster_output_index, output_sharding_config,
cluster_to_core_index, location, old_parallel_execute_output,
cluster_idx, new_parallel_execute, *builder);
if (mlir::failed(result)) return mlir::failure();
} else {
auto result = HandleTileShardedOutputsUsingTfConcatOps(
tpu_cluster_output_index, output_sharding_config,
cluster_to_core_index, location, old_parallel_execute_output,
cluster_idx, new_parallel_execute, builder);
if (failed(result)) return mlir::failure();
}
continue;
}
int logical_device_id = 0;
if (output_sharding_type == xla::OpSharding::MAXIMAL)
logical_device_id = output_sharding.tile_assignment_devices(0);
int region_output_index;
if (failed(LookupClusterToCoreIndex(
location, cluster_to_core_index, logical_device_id,
tpu_cluster_output_index, ®ion_output_index)))
return mlir::failure();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(
cluster_idx + logical_device_id)[region_output_index];
old_parallel_execute_output.replaceAllUsesWith(output_from_logical_device);
}
return mlir::success();
}
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> GetMetadataArgumentMapping(
const tpu::TPUCompileMetadataProto& metadata) {
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> input_mappings(
metadata.num_cores_per_replica(), llvm::SmallVector<int64_t, 4>());
if (metadata.num_cores_per_replica() == 1) {
input_mappings.front().resize(metadata.args_size());
std::iota(input_mappings.front().begin(), input_mappings.front().end(), 0);
return input_mappings;
}
for (const auto& arg_and_idx : llvm::enumerate(metadata.args())) {
const auto& sharding = arg_and_idx.value().sharding();
const int64_t idx = arg_and_idx.index();
const auto sharding_type = sharding.type();
if (sharding_type == xla::OpSharding::OTHER) {
for (const auto& device : sharding.tile_assignment_devices()) {
CHECK(device >= 0 && device < input_mappings.size());
input_mappings[device].push_back(idx);
}
} else if (sharding_type == xla::OpSharding::REPLICATED) {
for (auto& input : input_mappings) input.push_back(idx);
} else {
assert(sharding_type == xla::OpSharding::MAXIMAL);
CHECK(sharding.tile_assignment_devices(0) >= 0 &&
sharding.tile_assignment_devices(0) < input_mappings.size());
input_mappings[sharding.tile_assignment_devices(0)].push_back(idx);
}
}
return input_mappings;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tsl/platform/statusor.h"
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
TEST(XLAShardingUtilTest, TestShapesCheckForSplitSharding) {
static const char* const module_str =
R"(
module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:1/device:CPU:0", "/job:localhost/replica:0/task:1/device:TPU:0", "/job:localhost/replica:0/task:1/device:TPU:1", "/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0"]} {
func.func @parallel_execute_with_tiled_input(%arg0: tensor<128x9xf32>, %arg1: tensor<128x9xf32>, %arg2: tensor<128x10xi32>, %arg3: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%0:2, %1:2 = tf_device.replicate([%arg0, %arg1] as %ri_1: tensor<128x9xf32>, [%arg2, %arg3] as %ri_2: tensor<128x10xi32>) {n = 2 : i32} {
%1 = "tf_device.launch"() <{device = "TPU_REPLICATED_HOST_0"}> ({
%identity = "tf.Identity"(%ri_1) {ici_weight_distribution_mlir_bridge_marker = true} : (tensor<128x9xf32>) -> tensor<128x9xf32>
tf_device.return %identity : tensor<128x9xf32>
}) {ici_weight_distribution_mlir_bridge_marker = true} : () -> tensor<128x9xf32>
%2, %3 = "tf_device.cluster_func"(%1, %ri_2) {_xla_compile_device_type = "TPU", _replication_info = "cluster0", func = @tpu0_func, num_cores_per_replica = 2, step_marker_location = "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP", topology = "\0A\04\01\02\01\02\10\02\18\02\22\10\00\00\00\00\00\00\00\01\00\01\00\00\00\01\00\01", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], input_sharding_configuration = ["\08\03\1A\02\01\02\22\02\00\01", "\08\01\1A\01\01\22\01\01"], output_sharding_configuration = ["\08\01\1A\01\01\22\01\00", ""], use_spmd_for_xla_partitioning = false} : (tensor<128x9xf32>, tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
tf_device.return %2, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.return %0#0, %1#0 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.func @tpu0_func(%arg0: tensor<128x9xf32>, %arg1: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%1, %2 = "tf.A"(%arg0) : (tensor<128x9xf32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
%4 = "tf.B"(%1, %arg1) : (tensor<128x10xi32>, tensor<128x10xi32>) -> (tensor<128x10xi32>)
%3 = "tf.XlaSharding"(%2) { _XlaSharding = "", sharding = "" } : (tensor<10x5xi1>) -> tensor<10x5xi1>
func.return %4, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
auto cluster_func_op = cluster_func_ops[0];
llvm::SmallVector<xla::OpSharding, 4> output_shardings;
auto result = tensorflow::ParseAndValidateOutputSharding(2, cluster_func_op,
&output_shardings);
ASSERT_TRUE(succeeded(result));
ASSERT_TRUE(tensorflow::AreInputOutputShapesStaticallyKnownForSplitSharding(
output_shardings, cluster_func_op));
}
TEST(XLAShardingUtilTest, TestShapesCheckForSplitShardingWithUnknownShapes) {
static const char* const module_str =
R"(
module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:1/device:CPU:0", "/job:localhost/replica:0/task:1/device:TPU:0", "/job:localhost/replica:0/task:1/device:TPU:1", "/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0"]} {
func.func @parallel_execute_with_tiled_input(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>, %arg2: tensor<128x10xi32>, %arg3: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%0:2, %1:2 = tf_device.replicate([%arg0, %arg1] as %ri_1: tensor<*xf32>, [%arg2, %arg3] as %ri_2: tensor<128x10xi32>) {n = 2 : i32} {
%1 = "tf_device.launch"() <{device = "TPU_REPLICATED_HOST_0"}> ({
%identity = "tf.Identity"(%ri_1) {ici_weight_distribution_mlir_bridge_marker = true} : (tensor<*xf32>) -> tensor<*xf32>
tf_device.return %identity : tensor<*xf32>
}) {ici_weight_distribution_mlir_bridge_marker = true} : () -> tensor<*xf32>
%2, %3 = "tf_device.cluster_func"(%1, %ri_2) {_xla_compile_device_type = "TPU", _replication_info = "cluster0", func = @tpu0_func, num_cores_per_replica = 2, step_marker_location = "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP", topology = "\0A\04\01\02\01\02\10\02\18\02\22\10\00\00\00\00\00\00\00\01\00\01\00\00\00\01\00\01", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], input_sharding_configuration = ["\08\03\1A\02\01\02\22\02\00\01", "\08\01\1A\01\01\22\01\01"], output_sharding_configuration = ["\08\01\1A\01\01\22\01\00", ""], use_spmd_for_xla_partitioning = false} : (tensor<*xf32>, tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
tf_device.return %2, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.return %0#0, %1#0 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.func @tpu0_func(%arg0: tensor<*xf32>, %arg1: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%1, %2 = "tf.A"(%arg0) : (tensor<*xf32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
%4 = "tf.B"(%1, %arg1) : (tensor<128x10xi32>, tensor<128x10xi32>) -> (tensor<128x10xi32>)
%3 = "tf.XlaSharding"(%2) { _XlaSharding = "", sharding = "" } : (tensor<10x5xi1>) -> tensor<10x5xi1>
func.return %4, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
auto cluster_func_op = cluster_func_ops[0];
llvm::SmallVector<xla::OpSharding, 4> output_shardings;
auto result = tensorflow::ParseAndValidateOutputSharding(2, cluster_func_op,
&output_shardings);
ASSERT_TRUE(succeeded(result));
ASSERT_FALSE(tensorflow::AreInputOutputShapesStaticallyKnownForSplitSharding(
output_shardings, cluster_func_op));
}
TEST(XLAShardingUtilTest, NotDivisibleShardingSplitOpTest) {
static const char* const module_str =
R"(
module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:1/device:CPU:0", "/job:localhost/replica:0/task:1/device:TPU:0", "/job:localhost/replica:0/task:1/device:TPU:1", "/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0"]} {
func.func @uneven_input_sharding_disallowed(%arg0: tensor<128x10xf32>, %arg1: tensor<128x10xf32>, %arg2: tensor<*xi32>, %arg3: tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>) {
%0:2, %1:2 = tf_device.replicate([%arg0, %arg1] as %ri_1: tensor<128x10xf32>, [%arg2, %arg3] as %ri_2: tensor<*xi32>) {n = 2 : i32} {
%1, %2 = "tf_device.cluster_func"(%ri_1, %ri_2) {_xla_compile_device_type = "TPU", _replication_info = "cluster0", func = @tpu0_func, num_cores_per_replica = 2, step_marker_location = "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP", topology = "\0A\04\01\02\01\02\10\02\18\02\22\10\00\00\00\00\00\00\00\01\00\01\00\00\00\01\00\01", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], input_sharding_configuration = ["\08\03\12\12\10\0b\1a\02\01\04\2a\06\0a\02\01\00\20\01\32\02\00\00\1a\02\01\04\22\04\00\01\02\03", "\08\01\1A\01\01\22\01\01"], output_sharding_configuration = ["\08\01\1A\01\01\22\01\00", ""], use_spmd_for_xla_partitioning = false} : (tensor<128x10xf32>, tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>)
tf_device.return %1, %2 : tensor<*xi32>, tensor<*xi1>
}
func.return %0#0, %1#0 : tensor<*xi32>, tensor<*xi1>
}
func.func @tpu0_func(%arg0: tensor<128x10xf32>, %arg1: tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>) {
%1, %2 = "tf.A"(%arg0) : (tensor<128x10xf32>) -> (tensor<*xi32>, tensor<*xi1>)
%4 = "tf.B"(%1, %arg1) : (tensor<*xi32>, tensor<*xi32>) -> (tensor<*xi32>)
%3 = "tf.XlaSharding"(%2) { _XlaSharding = "", sharding = "" } : (tensor<*xi1>) -> tensor<*xi1>
func.return %4, %3 : tensor<*xi32>, tensor<*xi1>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
auto& cluster_func_op = cluster_func_ops[0];
int num_cores_per_replica = 4;
mlir::OpBuilder builder(&context);
bool use_xla_nd_ops = true;
llvm::SmallVector<llvm::SmallVector<mlir::Value, 4>, 4> input_list;
auto result = tensorflow::ExtractInputsForLogicalDevices(
num_cores_per_replica, cluster_func_op, &builder, use_xla_nd_ops,
&input_list);
ASSERT_TRUE(succeeded(result));
ASSERT_EQ(input_list.size(), num_cores_per_replica);
ASSERT_GT(input_list.front().size(), 0);
auto* op = input_list.front().front().getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::XlaSplitNDOp>(op));
op->destroy();
input_list.clear();
result = tensorflow::ExtractInputsForLogicalDevices(
num_cores_per_replica, cluster_func_op, &builder, false, &input_list);
ASSERT_TRUE(succeeded(result));
auto* split_op = input_list.front().front().getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::SplitOp>(split_op));
llvm::SmallVector<mlir::Value, 4> split_inputs(split_op->getOperands());
auto* const_op = split_inputs[0].getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::ConstOp>(const_op));
auto* pad_op = split_inputs[1].getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::PadOp>(pad_op));
llvm::SmallVector<mlir::Value, 4> pad_inputs(pad_op->getOperands());
auto* const_pad_value = pad_inputs[1].getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::ConstOp>(const_pad_value));
split_op->destroy();
const_op->destroy();
pad_op->destroy();
const_pad_value->destroy();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3090d1e-5324-455c-99cb-9831f60b4f72 | cpp | tensorflow/tensorflow | array4d | third_party/xla/xla/array4d.h | third_party/xla/xla/array4d_test.cc | #ifndef XLA_ARRAY4D_H_
#define XLA_ARRAY4D_H_
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <numeric>
#include <random>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
template <typename T>
class Array4D : public Array<T> {
public:
Array4D() : Array<T>(std::vector<int64_t>{0, 0, 0, 0}) {}
Array4D(int64_t planes, int64_t depth, int64_t height, int64_t width)
: Array<T>(std::vector<int64_t>{planes, depth, height, width}) {}
Array4D(int64_t planes, int64_t depth, int64_t height, int64_t width, T value)
: Array<T>(std::vector<int64_t>{planes, depth, height, width}, value) {}
template <typename Container = std::initializer_list<T>>
Array4D(int64_t planes, int64_t depth, int64_t height, int64_t width,
const Container& values)
: Array4D(planes, depth, height, width) {
this->SetValues(values);
}
Array4D(std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<T>>>>
values)
: Array<T>(values) {}
template <typename T2, array_impl::overload_for_float<T, T2> = true>
Array4D(std::initializer_list<std::initializer_list<
std::initializer_list<std::initializer_list<T2>>>>
values)
: Array<T>(values) {}
int64_t n4() const { return this->dim(3); }
int64_t n3() const { return this->dim(2); }
int64_t n2() const { return this->dim(1); }
int64_t n1() const { return this->dim(0); }
int64_t width() const { return this->dim(3); }
int64_t height() const { return this->dim(2); }
int64_t depth() const { return this->dim(1); }
int64_t planes() const { return this->dim(0); }
void FillWithYX(const Array2D<T>& value) {
CHECK_EQ(value.height(), height());
CHECK_EQ(value.width(), width());
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
(*this)(plane, depth, height, width) = value(height, width);
}
}
}
}
}
void FillWithZY(const Array2D<T>& value) {
CHECK_EQ(value.height(), depth());
CHECK_EQ(value.width(), height());
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
(*this)(plane, depth, height, width) = value(depth, height);
}
}
}
}
}
void FillWithPZ(const Array2D<T>& value) {
CHECK_EQ(value.height(), planes());
CHECK_EQ(value.width(), depth());
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
(*this)(plane, depth, height, width) = value(plane, depth);
}
}
}
}
}
void FillWithMinorDimNum() {
LOG(INFO) << "width: " << this->width();
LOG(INFO) << "height: " << this->height();
LOG(INFO) << "depth: " << this->depth();
LOG(INFO) << "planes: " << this->planes();
for (int64_t height = 0; height < this->height(); ++height) {
for (int64_t width = 0; width < this->width(); ++width) {
for (int64_t plane = 0; plane < planes(); ++plane) {
for (int64_t depth = 0; depth < this->depth(); ++depth) {
float this_val = plane * this->depth() + depth;
(*this)(plane, depth, height, width) = this_val;
}
}
}
}
}
};
}
#endif | #include "xla/array4d.h"
#include <initializer_list>
#include <numeric>
#include <vector>
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array2d.h"
#include "xla/test.h"
namespace xla {
namespace {
template <typename T>
int64_t Array4DLinearIndex(const Array4D<T>& arr,
absl::Span<const int64_t> idx) {
EXPECT_EQ(4, idx.size());
return (idx[3] + idx[2] * arr.n4() + idx[1] * arr.n3() * arr.n4() +
idx[0] * arr.n2() * arr.n3() * arr.n4());
}
TEST(Array4dTest, UninitializedDimsCtor) {
Array4D<int> empty(2, 3, 4, 5);
EXPECT_EQ(empty.n1(), 2);
EXPECT_EQ(empty.n2(), 3);
EXPECT_EQ(empty.n3(), 4);
EXPECT_EQ(empty.n4(), 5);
EXPECT_EQ(empty.num_elements(), 120);
}
TEST(Array4dTest, FillCtor) {
Array4D<int> fullof7(2, 3, 4, 5, 7);
EXPECT_EQ(fullof7.n1(), 2);
EXPECT_EQ(fullof7.n2(), 3);
EXPECT_EQ(fullof7.n3(), 4);
EXPECT_EQ(fullof7.n4(), 5);
fullof7.Each(
[](absl::Span<const int64_t> idx, int* cell) { EXPECT_EQ(*cell, 7); });
}
TEST(Array4dTest, ContainerCtor) {
std::vector<int> filler(120);
std::iota(filler.begin(), filler.end(), 0);
Array4D<int> arr(2, 3, 4, 5, filler);
EXPECT_EQ(arr.n1(), 2);
EXPECT_EQ(arr.n2(), 3);
EXPECT_EQ(arr.n3(), 4);
EXPECT_EQ(arr.n4(), 5);
arr.Each([&arr](absl::Span<const int64_t> idx, int* cell) {
EXPECT_EQ(*cell, Array4DLinearIndex(arr, idx));
});
}
TEST(Array3dTest, InitializerListCtor) {
Array4D<int> arr = {{{{1}, {2}}, {{3}, {4}}, {{5}, {6}}, {{7}, {8}}},
{{{9}, {10}}, {{11}, {12}}, {{13}, {14}}, {{15}, {16}}},
{{{17}, {18}}, {{19}, {20}}, {{21}, {22}}, {{23}, {24}}}};
EXPECT_EQ(arr.n1(), 3);
EXPECT_EQ(arr.n2(), 4);
EXPECT_EQ(arr.n3(), 2);
EXPECT_EQ(arr.n4(), 1);
EXPECT_EQ(arr.num_elements(), 24);
EXPECT_EQ(arr(0, 0, 0, 0), 1);
EXPECT_EQ(arr(0, 0, 1, 0), 2);
EXPECT_EQ(arr(0, 1, 0, 0), 3);
EXPECT_EQ(arr(0, 3, 1, 0), 8);
EXPECT_EQ(arr(1, 0, 0, 0), 9);
EXPECT_EQ(arr(1, 1, 1, 0), 12);
EXPECT_EQ(arr(2, 0, 0, 0), 17);
EXPECT_EQ(arr(2, 1, 1, 0), 20);
EXPECT_EQ(arr(2, 2, 0, 0), 21);
EXPECT_EQ(arr(2, 3, 1, 0), 24);
}
TEST(Array3dTest, InitializerListCtorHalf) {
Array4D<Eigen::half> arr = {
{{{1.0f}, {2.0f}}, {{3.0f}, {4.0f}}, {{5.0f}, {6.0f}}, {{7.0f}, {8.0f}}},
{{{9.0f}, {10.0f}},
{{11.0f}, {12.0f}},
{{13.0f}, {14.0f}},
{{15.0f}, {16.0f}}},
{{{17.0f}, {18.0f}},
{{19.0f}, {20.0f}},
{{21.0f}, {22.0f}},
{{23.0f}, {24.0f}}}};
EXPECT_EQ(arr.n1(), 3);
EXPECT_EQ(arr.n2(), 4);
EXPECT_EQ(arr.n3(), 2);
EXPECT_EQ(arr.n4(), 1);
EXPECT_EQ(arr.num_elements(), 24);
EXPECT_EQ(arr(0, 0, 0, 0), static_cast<Eigen::half>(1));
EXPECT_EQ(arr(0, 0, 1, 0), static_cast<Eigen::half>(2));
EXPECT_EQ(arr(0, 1, 0, 0), static_cast<Eigen::half>(3));
EXPECT_EQ(arr(0, 3, 1, 0), static_cast<Eigen::half>(8));
EXPECT_EQ(arr(1, 0, 0, 0), static_cast<Eigen::half>(9));
EXPECT_EQ(arr(1, 1, 1, 0), static_cast<Eigen::half>(12));
EXPECT_EQ(arr(2, 0, 0, 0), static_cast<Eigen::half>(17));
EXPECT_EQ(arr(2, 1, 1, 0), static_cast<Eigen::half>(20));
EXPECT_EQ(arr(2, 2, 0, 0), static_cast<Eigen::half>(21));
EXPECT_EQ(arr(2, 3, 1, 0), static_cast<Eigen::half>(24));
}
TEST(Array4dTest, Fill) {
Array4D<int> fullof7(2, 3, 4, 5, 7);
fullof7.Each(
[](absl::Span<const int64_t> idx, int* cell) { EXPECT_EQ(*cell, 7); });
fullof7.Fill(11);
fullof7.Each(
[](absl::Span<const int64_t> idx, int* cell) { EXPECT_EQ(*cell, 11); });
}
TEST(Array4dTest, FillWithMultiples) {
Array4D<float> arr(2, 3, 4, 5);
arr.FillWithMultiples(2.0f);
arr.Each([&arr](absl::Span<const int64_t> idx, float* cell) {
EXPECT_EQ(*cell, 2.0f * Array4DLinearIndex(arr, idx));
});
}
TEST(Array4dTest, FillRasterDimensionDepthOne) {
Array4D<float> array(1, 1, 128, 128);
Array2D<float> raster(128, 128);
for (int row = 0; row < 128; ++row) {
for (int col = 0; col < 128; ++col) {
raster(row, col) = row * 1000.0 + col;
}
}
array.FillWithYX(raster);
VLOG(1) << array.ToString();
EXPECT_FLOAT_EQ(raster(0, 0), array(0, 0, 0, 0));
EXPECT_FLOAT_EQ(raster(0, 1), array(0, 0, 0, 1));
EXPECT_FLOAT_EQ(raster(1, 0), array(0, 0, 1, 0));
EXPECT_FLOAT_EQ(raster(1, 1), array(0, 0, 1, 1));
EXPECT_FLOAT_EQ(raster(2, 0), array(0, 0, 2, 0));
EXPECT_FLOAT_EQ(raster(127, 127), array(0, 0, 127, 127));
EXPECT_FLOAT_EQ(0, array(0, 0, 0, 0));
EXPECT_FLOAT_EQ(1, array(0, 0, 0, 1));
EXPECT_FLOAT_EQ(2, array(0, 0, 0, 2));
EXPECT_FLOAT_EQ(1001, array(0, 0, 1, 1));
EXPECT_FLOAT_EQ(2001, array(0, 0, 2, 1));
EXPECT_FLOAT_EQ(127000, array(0, 0, 127, 0));
EXPECT_FLOAT_EQ(127127, array(0, 0, 127, 127));
}
TEST(Array4dTest, FillWithPzTestDepthOne) {
Array2D<float> matrix(3, 2);
std::initializer_list<std::initializer_list<float>> values = {
{-3.f, -0.1f}, {0.f, -0.1f}, {3.f, 0.2f},
};
int rowno = 0;
for (auto row : values) {
int colno = 0;
for (float f : row) {
matrix(rowno, colno) = f;
colno++;
}
rowno++;
}
Array4D<float> actual(3, 2, 1, 1);
actual.FillWithPZ(matrix);
EXPECT_FLOAT_EQ(-3, actual(0, 0, 0, 0));
EXPECT_FLOAT_EQ(-0.1, actual(0, 1, 0, 0));
EXPECT_FLOAT_EQ(0, actual(1, 0, 0, 0));
EXPECT_FLOAT_EQ(-0.1, actual(1, 1, 0, 0));
EXPECT_FLOAT_EQ(3, actual(2, 0, 0, 0));
EXPECT_FLOAT_EQ(0.2, actual(2, 1, 0, 0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array4d.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/array4d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de005b97-03d9-41d3-b166-71fb677e9413 | cpp | tensorflow/tensorflow | ctc_ops | tensorflow/core/ops/ctc_ops.cc | tensorflow/core/ops/ctc_ops_test.cc | #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("CTCLoss")
.Input("inputs: T")
.Input("labels_indices: int64")
.Input("labels_values: int32")
.Input("sequence_length: int32")
.Attr("preprocess_collapse_repeated: bool = false")
.Attr("ctc_merge_repeated: bool = true")
.Attr("ignore_longer_outputs_than_inputs: bool = false")
.Output("loss: T")
.Output("gradient: T")
.Attr("T: {float, double} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle labels_indices;
ShapeHandle labels_values;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &labels_indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &labels_values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &sequence_length));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0),
c->Dim(labels_values, 0), &unused));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
TF_RETURN_IF_ERROR(c->ReplaceDim(inputs, 1, batch_size, &inputs));
c->set_output(0, c->Vector(batch_size));
c->set_output(1, inputs);
return absl::OkStatus();
});
REGISTER_OP("CTCLossV2")
.Input("inputs: float")
.Input("labels_indices: int64")
.Input("labels_values: int32")
.Input("sequence_length: int32")
.Attr("preprocess_collapse_repeated: bool = false")
.Attr("ctc_merge_repeated: bool = true")
.Attr("ignore_longer_outputs_than_inputs: bool = false")
.Output("loss: float")
.Output("gradient: float")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle labels_indices;
ShapeHandle labels_values;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &labels_indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &labels_values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &sequence_length));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0),
c->Dim(labels_values, 0), &unused));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
TF_RETURN_IF_ERROR(c->ReplaceDim(inputs, 1, batch_size, &inputs));
c->set_output(0, c->Vector(batch_size));
c->set_output(1, inputs);
return absl::OkStatus();
});
REGISTER_OP("CTCGreedyDecoder")
.Input("inputs: T")
.Input("sequence_length: int32")
.Attr("merge_repeated: bool = false")
.Attr("blank_index: int = -1")
.Output("decoded_indices: int64")
.Output("decoded_values: int64")
.Output("decoded_shape: int64")
.Output("log_probability: T")
.Attr("T: {float, double} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &sequence_length));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
DimensionHandle total_decoded_outputs = c->UnknownDim();
c->set_output(0, c->Matrix(total_decoded_outputs, 2));
c->set_output(1, c->Vector(total_decoded_outputs));
c->set_output(2, c->Vector(2));
c->set_output(3, c->Matrix(batch_size, 1));
return absl::OkStatus();
});
REGISTER_OP("CTCBeamSearchDecoder")
.Input("inputs: T")
.Input("sequence_length: int32")
.Attr("beam_width: int >= 1")
.Attr("top_paths: int >= 1")
.Attr("merge_repeated: bool = true")
.Output("decoded_indices: top_paths * int64")
.Output("decoded_values: top_paths * int64")
.Output("decoded_shape: top_paths * int64")
.Output("log_probability: T")
.Attr("T: {float, double} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &sequence_length));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
int32_t top_paths;
TF_RETURN_IF_ERROR(c->GetAttr("top_paths", &top_paths));
int out_idx = 0;
for (int i = 0; i < top_paths; ++i) {
c->set_output(out_idx++, c->Matrix(InferenceContext::kUnknownDim, 2));
}
for (int i = 0; i < top_paths; ++i) {
c->set_output(out_idx++, c->Vector(InferenceContext::kUnknownDim));
}
ShapeHandle shape_v = c->Vector(2);
for (int i = 0; i < top_paths; ++i) {
c->set_output(out_idx++, shape_v);
}
c->set_output(out_idx++, c->Matrix(batch_size, top_paths));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CtcOpsTest, CTCLoss_ShapeFn) {
ShapeInferenceTestOp op("CTCLoss");
INFER_ERROR("must be rank 3", op, "[];?;?;?");
INFER_ERROR("must be rank 2", op, "?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;?;[]");
INFER_ERROR("must be equal", op, "?;[1,?];[2];?");
INFER_OK(op, "[?,?,?];?;?;[?]", "[d0_1|d3_0];[d0_0,d0_1|d3_0,d0_2]");
INFER_OK(op, "[?,1,?];?;?;[1]", "[d0_1|d3_0];[d0_0,d0_1|d3_0,d0_2]");
INFER_OK(op, "[?,?,?];?;?;[1]", "[d3_0];[d0_0,d3_0,d0_2]");
INFER_OK(op, "[?,1,?];?;?;[?]", "[d0_1];[d0_0,d0_1,d0_2]");
INFER_ERROR("must be equal", op, "[?,1,?];?;?;[2]");
}
TEST(CtcOpsTest, CTCGreedyDecoder_ShapeFn) {
ShapeInferenceTestOp op("CTCGreedyDecoder");
INFER_ERROR("must be rank 3", op, "[];?");
INFER_ERROR("must be rank 1", op, "?;[]");
INFER_OK(op, "[?,?,?];[?]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,1,?];[1]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,?,?];[1]", "[?,2];[?];[2];[d1_0,1]");
INFER_OK(op, "[?,1,?];[?]", "[?,2];[?];[2];[d0_1,1]");
INFER_ERROR("must be equal", op, "[?,1,?];[2]");
}
TEST(CtcOpsTest, CTCBeamSearchDecoder_ShapeFn) {
ShapeInferenceTestOp op("CTCBeamSearchDecoder");
auto set_top_paths = [&op](int top_paths) {
TF_ASSERT_OK(NodeDefBuilder("test", "CTCBeamSearchDecoder")
.Input({"a", 0, DT_FLOAT})
.Input({"b", 0, DT_INT32})
.Attr("top_paths", top_paths)
.Finalize(&op.node_def));
};
set_top_paths(1);
INFER_ERROR("must be rank 3", op, "[];?");
INFER_ERROR("must be rank 1", op, "?;[]");
INFER_OK(op, "[?,?,?];[?]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,1,?];[1]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,?,?];[1]", "[?,2];[?];[2];[d1_0,1]");
INFER_OK(op, "[?,1,?];[?]", "[?,2];[?];[2];[d0_1,1]");
INFER_ERROR("must be equal", op, "[?,1,?];[2]");
set_top_paths(2);
INFER_OK(op, "?;?", "[?,2];[?,2];[?];[?];[2];[2];[?,2]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/ctc_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/ctc_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34799c31-1af1-42ef-be2e-3fbff6036ffd | cpp | google/arolla | logic_ops | arolla/qexpr/operators/dense_array/logic_ops.h | arolla/qexpr/operators/dense_array/logic_ops_test.cc | #ifndef AROLLA_QEXPR_OPERATORS_DENSE_ARRAY_LOGIC_OPS_H_
#define AROLLA_QEXPR_OPERATORS_DENSE_ARRAY_LOGIC_OPS_H_
#include <cstdint>
#include <cstring>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/dense_array/bitmap.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/ops/dense_ops.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/util/unit.h"
#include "arolla/util/view_types.h"
namespace arolla {
struct DenseArrayHasOp {
template <typename T>
DenseArray<Unit> operator()(const DenseArray<T>& arg) const {
return {VoidBuffer(arg.size()), arg.bitmap, arg.bitmap_bit_offset};
}
};
struct DenseArrayPresenceAndOp {
template <typename T>
absl::StatusOr<DenseArray<T>> operator()(EvaluationContext* ctx,
const DenseArray<T>& lhs,
const DenseArray<Unit>& rhs) const {
if (ABSL_PREDICT_FALSE(lhs.size() != rhs.size())) {
return SizeMismatchError({lhs.size(), rhs.size()});
}
if (rhs.bitmap.empty()) {
return lhs;
} else if (lhs.bitmap.empty()) {
return DenseArray<T>{lhs.values, rhs.bitmap, rhs.bitmap_bit_offset};
} else {
int64_t bitmap_size = bitmap::BitmapSize(lhs.size());
bitmap::RawBuilder bldr(bitmap_size, &ctx->buffer_factory());
bitmap::Intersect(lhs.bitmap, rhs.bitmap, lhs.bitmap_bit_offset,
rhs.bitmap_bit_offset, bldr.GetMutableSpan());
return DenseArray<T>{
lhs.values, std::move(bldr).Build(),
std::min(lhs.bitmap_bit_offset, rhs.bitmap_bit_offset)};
}
}
};
struct DenseArrayPresenceNotOp {
template <typename T>
DenseArray<Unit> operator()(EvaluationContext* ctx,
const DenseArray<T>& arg) const {
if (arg.bitmap.empty()) {
return CreateEmptyDenseArray<Unit>(arg.size(), &ctx->buffer_factory());
}
absl::Span<const bitmap::Word> bitmap_in = arg.bitmap.span();
int64_t first_not_zero_index = 0;
int64_t bitmap_size = arg.bitmap.size();
while (first_not_zero_index < bitmap_size &&
bitmap_in[first_not_zero_index] == 0) {
first_not_zero_index++;
}
if (first_not_zero_index == bitmap_size) {
return {VoidBuffer(arg.size())};
}
bitmap::RawBuilder bldr(bitmap_size, &ctx->buffer_factory());
absl::Span<bitmap::Word> bitmap_out = bldr.GetMutableSpan();
if (first_not_zero_index > 0) {
std::memset(bitmap_out.data(), 0xff,
sizeof(bitmap::Word) * first_not_zero_index);
}
for (int64_t i = first_not_zero_index; i < bitmap_size; ++i) {
bitmap_out[i] = ~bitmap_in[i];
}
return {VoidBuffer(arg.size()), std::move(bldr).Build(),
arg.bitmap_bit_offset};
}
};
struct DenseArrayPresenceOrOp {
template <typename T>
absl::StatusOr<DenseArray<T>> operator()(EvaluationContext* ctx,
const DenseArray<T>& lhs,
const DenseArray<T>& rhs) const {
if (ABSL_PREDICT_FALSE(lhs.size() != rhs.size())) {
return SizeMismatchError({lhs.size(), rhs.size()});
}
if (lhs.bitmap.empty()) {
return lhs;
} else if (bitmap::AreAllBitsUnset(lhs.bitmap.begin(), lhs.size())) {
return rhs;
} else {
auto fn = [&](OptionalValue<view_type_t<T>> a,
OptionalValue<view_type_t<T>> b) {
return OptionalValue<view_type_t<T>>{a.present || b.present,
a.present ? a.value : b.value};
};
return CreateDenseOp<DenseOpFlags::kRunOnMissing |
DenseOpFlags::kNoBitmapOffset |
DenseOpFlags::kNoSizeValidation,
decltype(fn), T>(fn, &ctx->buffer_factory())(lhs,
rhs);
}
}
template <typename T>
DenseArray<T> operator()(EvaluationContext* ctx, const DenseArray<T>& lhs,
const OptionalValue<T>& rhs) const {
if (!rhs.present || lhs.bitmap.empty()) {
return lhs;
} else if (bitmap::AreAllBitsUnset(lhs.bitmap.begin(), lhs.size())) {
return CreateConstDenseArray<T>(lhs.size(), rhs.value,
&ctx->buffer_factory());
} else {
auto fn = [value = rhs.value](OptionalValue<view_type_t<T>> a) {
return a.present ? a.value : value;
};
return CreateDenseOp<DenseOpFlags::kRunOnMissing |
DenseOpFlags::kNoBitmapOffset |
DenseOpFlags::kNoSizeValidation,
decltype(fn), T>(fn, &ctx->buffer_factory())(lhs);
}
}
};
}
#endif | #include <optional>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/util/unit.h"
namespace arolla::testing {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TEST(LogicOpsTest, DenseArrayPresenceAndOp) {
EXPECT_THAT(InvokeOperator<DenseArray<int>>(
"core.presence_and", CreateDenseArray<int>({1, 2, 3}),
CreateDenseArray<Unit>({kUnit, std::nullopt, kUnit})),
IsOkAndHolds(ElementsAre(1, std::nullopt, 3)));
EXPECT_THAT(
InvokeOperator<DenseArray<int>>(
"core.presence_and", CreateDenseArray<int>({1, 2, std::nullopt}),
CreateDenseArray<Unit>({kUnit, std::nullopt, kUnit})),
IsOkAndHolds(ElementsAre(1, std::nullopt, std::nullopt)));
EXPECT_THAT(
InvokeOperator<DenseArray<int>>(
"core.presence_and", CreateDenseArray<int>({1, 2, std::nullopt}),
CreateDenseArray<Unit>({kUnit, kUnit, kUnit})),
IsOkAndHolds(ElementsAre(1, 2, std::nullopt)));
}
TEST(LogicOpsTest, DenseArrayPresenceOrOp) {
EXPECT_THAT(InvokeOperator<DenseArray<int>>("core.presence_or",
CreateDenseArray<int>({1, 2, 3}),
CreateDenseArray<int>({4, 5, 6})),
IsOkAndHolds(ElementsAre(1, 2, 3)));
EXPECT_THAT(
InvokeOperator<DenseArray<int>>(
"core.presence_or", CreateDenseArray<int>({1, 2, std::nullopt}),
CreateDenseArray<int>({4, 5, 6})),
IsOkAndHolds(ElementsAre(1, 2, 6)));
EXPECT_THAT(
InvokeOperator<DenseArray<int>>(
"core.presence_or", CreateDenseArray<int>({1, 2, std::nullopt}),
CreateDenseArray<int>({4, 5, std::nullopt})),
IsOkAndHolds(ElementsAre(1, 2, std::nullopt)));
EXPECT_THAT(
InvokeOperator<DenseArray<int>>(
"core.presence_or",
CreateDenseArray<int>({std::nullopt, std::nullopt, std::nullopt}),
CreateDenseArray<int>({4, 5, std::nullopt})),
IsOkAndHolds(ElementsAre(4, 5, std::nullopt)));
}
TEST(LogicOpsTest, DenseArrayPresenceNotOp) {
{
auto full_int = CreateConstDenseArray<int>(35, 7);
auto empty_unit = CreateEmptyDenseArray<Unit>(35);
EXPECT_THAT(InvokeOperator<DenseArray<Unit>>("core.presence_not._builtin",
full_int),
IsOkAndHolds(ElementsAreArray(empty_unit)));
}
{
auto empty_int = CreateEmptyDenseArray<int>(35);
auto full_unit = CreateConstDenseArray<Unit>(35, kUnit);
EXPECT_THAT(InvokeOperator<DenseArray<Unit>>("core.presence_not._builtin",
empty_int),
IsOkAndHolds(ElementsAreArray(full_unit)));
}
{
std::vector<std::optional<int>> input(35, std::nullopt);
input[15] = 5;
input[24] = 7;
std::vector<OptionalValue<Unit>> expected(input.size());
for (int i = 0; i < input.size(); ++i) {
expected[i].present = !input[i].has_value();
}
ASSERT_OK_AND_ASSIGN(
auto res, InvokeOperator<DenseArray<Unit>>(
"core.presence_not._builtin",
CreateDenseArray<int>(input.begin(), input.end())));
EXPECT_EQ(std::vector(res.begin(), res.end()), expected);
}
}
TEST(LogicOpsTest, DenseArrayPresenceOrWithOptionalOp) {
EXPECT_THAT(InvokeOperator<DenseArray<int>>("core.presence_or",
CreateDenseArray<int>({1, 2, 3}),
OptionalValue<int>(4)),
IsOkAndHolds(ElementsAre(1, 2, 3)));
EXPECT_THAT(
InvokeOperator<DenseArray<int>>(
"core.presence_or", CreateDenseArray<int>({1, std::nullopt, 3}),
OptionalValue<int>(4)),
IsOkAndHolds(ElementsAre(1, 4, 3)));
EXPECT_THAT(InvokeOperator<DenseArray<int>>(
"core.presence_or",
CreateDenseArray<int>({std::nullopt, std::nullopt}),
OptionalValue<int>(4)),
IsOkAndHolds(ElementsAre(4, 4)));
EXPECT_THAT(InvokeOperator<DenseArray<int>>("core.presence_or",
CreateDenseArray<int>({3, 2}),
OptionalValue<int>()),
IsOkAndHolds(ElementsAre(3, 2)));
EXPECT_THAT(InvokeOperator<DenseArray<int>>(
"core.presence_or", CreateDenseArray<int>({3, std::nullopt}),
OptionalValue<int>()),
IsOkAndHolds(ElementsAre(3, std::nullopt)));
EXPECT_THAT(InvokeOperator<DenseArray<int>>(
"core.presence_or",
CreateDenseArray<int>({std::nullopt, std::nullopt}),
OptionalValue<int>()),
IsOkAndHolds(ElementsAre(std::nullopt, std::nullopt)));
}
TEST(LogicOpsTest, HasOp) {
auto array = CreateDenseArray<float>({1.0, {}, 2.0, {}, 3.0});
ASSERT_OK_AND_ASSIGN(
auto mask, InvokeOperator<DenseArray<Unit>>("core.has._array", array));
EXPECT_THAT(mask,
ElementsAre(kUnit, std::nullopt, kUnit, std::nullopt, kUnit));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/dense_array/logic_ops.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/dense_array/logic_ops_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
8776898b-4b66-476c-826a-904aac0af537 | cpp | google/quiche | quiche_buffer_allocator | quiche/common/quiche_buffer_allocator.cc | quiche/common/quiche_buffer_allocator_test.cc | #include "quiche/common/quiche_buffer_allocator.h"
#include <algorithm>
#include <cstring>
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_prefetch.h"
namespace quiche {
QuicheBuffer QuicheBuffer::CopyFromIovec(QuicheBufferAllocator* allocator,
const struct iovec* iov, int iov_count,
size_t iov_offset,
size_t buffer_length) {
if (buffer_length == 0) {
return {};
}
int iovnum = 0;
while (iovnum < iov_count && iov_offset >= iov[iovnum].iov_len) {
iov_offset -= iov[iovnum].iov_len;
++iovnum;
}
QUICHE_DCHECK_LE(iovnum, iov_count);
if (iovnum >= iov_count) {
QUICHE_BUG(quiche_bug_10839_1)
<< "iov_offset larger than iovec total size.";
return {};
}
QUICHE_DCHECK_LE(iov_offset, iov[iovnum].iov_len);
const size_t iov_available = iov[iovnum].iov_len - iov_offset;
size_t copy_len = std::min(buffer_length, iov_available);
if (copy_len == iov_available && iovnum + 1 < iov_count) {
char* next_base = static_cast<char*>(iov[iovnum + 1].iov_base);
quiche::QuichePrefetchT0(next_base);
if (iov[iovnum + 1].iov_len >= 64) {
quiche::QuichePrefetchT0(next_base + ABSL_CACHELINE_SIZE);
}
}
QuicheBuffer buffer(allocator, buffer_length);
const char* src = static_cast<char*>(iov[iovnum].iov_base) + iov_offset;
char* dst = buffer.data();
while (true) {
memcpy(dst, src, copy_len);
buffer_length -= copy_len;
dst += copy_len;
if (buffer_length == 0 || ++iovnum >= iov_count) {
break;
}
src = static_cast<char*>(iov[iovnum].iov_base);
copy_len = std::min(buffer_length, iov[iovnum].iov_len);
}
QUICHE_BUG_IF(quiche_bug_10839_2, buffer_length > 0)
<< "iov_offset + buffer_length larger than iovec total size.";
return buffer;
}
} | #include "quiche/common/quiche_buffer_allocator.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quiche {
namespace test {
namespace {
TEST(QuicheBuffer, CopyFromEmpty) {
SimpleBufferAllocator allocator;
QuicheBuffer buffer = QuicheBuffer::Copy(&allocator, "");
EXPECT_TRUE(buffer.empty());
}
TEST(QuicheBuffer, Copy) {
SimpleBufferAllocator allocator;
QuicheBuffer buffer = QuicheBuffer::Copy(&allocator, "foobar");
EXPECT_EQ("foobar", buffer.AsStringView());
}
TEST(QuicheBuffer, CopyFromIovecZeroBytes) {
const int buffer_length = 0;
SimpleBufferAllocator allocator;
QuicheBuffer buffer = QuicheBuffer::CopyFromIovec(
&allocator, nullptr,
0, 0, buffer_length);
EXPECT_TRUE(buffer.empty());
constexpr absl::string_view kData("foobar");
iovec iov = MakeIOVector(kData);
buffer = QuicheBuffer::CopyFromIovec(&allocator, &iov,
1,
0, buffer_length);
EXPECT_TRUE(buffer.empty());
buffer = QuicheBuffer::CopyFromIovec(&allocator, &iov,
1,
3, buffer_length);
EXPECT_TRUE(buffer.empty());
}
TEST(QuicheBuffer, CopyFromIovecSimple) {
constexpr absl::string_view kData("foobar");
iovec iov = MakeIOVector(kData);
SimpleBufferAllocator allocator;
QuicheBuffer buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov,
1, 0,
6);
EXPECT_EQ("foobar", buffer.AsStringView());
buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov,
1, 0,
3);
EXPECT_EQ("foo", buffer.AsStringView());
buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov,
1, 3,
3);
EXPECT_EQ("bar", buffer.AsStringView());
buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov,
1, 1,
4);
EXPECT_EQ("ooba", buffer.AsStringView());
}
TEST(QuicheBuffer, CopyFromIovecMultiple) {
constexpr absl::string_view kData1("foo");
constexpr absl::string_view kData2("bar");
iovec iov[] = {MakeIOVector(kData1), MakeIOVector(kData2)};
SimpleBufferAllocator allocator;
QuicheBuffer buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov[0],
2, 0,
6);
EXPECT_EQ("foobar", buffer.AsStringView());
buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov[0],
2, 0,
3);
EXPECT_EQ("foo", buffer.AsStringView());
buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov[0],
2, 3,
3);
EXPECT_EQ("bar", buffer.AsStringView());
buffer =
QuicheBuffer::CopyFromIovec(&allocator, &iov[0],
2, 1,
4);
EXPECT_EQ("ooba", buffer.AsStringView());
}
TEST(QuicheBuffer, CopyFromIovecOffsetTooLarge) {
constexpr absl::string_view kData1("foo");
constexpr absl::string_view kData2("bar");
iovec iov[] = {MakeIOVector(kData1), MakeIOVector(kData2)};
SimpleBufferAllocator allocator;
EXPECT_QUICHE_BUG(
QuicheBuffer::CopyFromIovec(&allocator, &iov[0],
2, 10,
6),
"iov_offset larger than iovec total size");
}
TEST(QuicheBuffer, CopyFromIovecTooManyBytesRequested) {
constexpr absl::string_view kData1("foo");
constexpr absl::string_view kData2("bar");
iovec iov[] = {MakeIOVector(kData1), MakeIOVector(kData2)};
SimpleBufferAllocator allocator;
EXPECT_QUICHE_BUG(
QuicheBuffer::CopyFromIovec(&allocator, &iov[0],
2, 2,
10),
R"(iov_offset \+ buffer_length larger than iovec total size)");
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_buffer_allocator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_buffer_allocator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
34bdedd9-12aa-43c1-9d00-2b94e3716ffb | cpp | google/arolla | const_with_shape | arolla/expr/optimization/peephole_optimizations/const_with_shape.cc | arolla/expr/optimization/peephole_optimizations/const_with_shape_test.cc | #include "arolla/expr/optimization/peephole_optimizations/const_with_shape.h"
#include <initializer_list>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
struct OpRecord {
const char* const from_op;
const char* const to_op;
};
constexpr std::initializer_list<OpRecord> kUnaryPointwiseOps = {
{"bool.logical_not", "bool.logical_not"},
{"core.has._array", "core.has"},
{"core.has._optional", "core.has"},
{"core.presence_not._builtin", "core.presence_not"},
{"core.to_bool", "core.to_bool"},
{"core.to_float32", "core.to_float32"},
{"core.to_float64", "core.to_float64"},
{"core.to_int32", "core.to_int32"},
{"core.to_int64", "core.to_int64"},
{"core.to_optional._scalar", "core.to_optional"},
{"core.to_uint64", "core.to_uint64"},
{"math.abs", "math.abs"},
{"math.ceil", "math.ceil"},
{"math.exp", "math.exp"},
{"math.expm1", "math.expm1"},
{"math.floor", "math.floor"},
{"math.is_finite", "math.is_finite"},
{"math.is_inf", "math.is_inf"},
{"math.is_nan", "math.is_nan"},
{"math.log", "math.log"},
{"math.log10", "math.log10"},
{"math.log1p", "math.log1p"},
{"math.log2", "math.log2"},
{"math.logit", "math.logit"},
{"math.neg", "math.neg"},
{"math.pos", "math.pos"},
{"math.round", "math.round"},
{"math.sigmoid", "math.sigmoid"},
{"math.sign", "math.sign"},
};
constexpr std::initializer_list<OpRecord> kBinaryPointwiseOps = {
{"bool.equal", "bool.equal"},
{"bool.less", "bool.less"},
{"bool.less_equal", "bool.less_equal"},
{"bool.logical_and", "bool.logical_and"},
{"bool.logical_or", "bool.logical_or"},
{"bool.not_equal", "bool.not_equal"},
{"core.equal", "core.equal"},
{"core.less", "core.less"},
{"core.less_equal", "core.less_equal"},
{"core.not_equal", "core.not_equal"},
{"core.presence_and", "core.presence_and"},
{"core.presence_or", "core.presence_or"},
{"math.add", "math.add"},
{"math.divide", "math.divide"},
{"math.floordiv", "math.floordiv"},
{"math.fmod", "math.fmod"},
{"math.max", "math.max"},
{"math.min", "math.min"},
{"math.mod", "math.mod"},
{"math.multiply", "math.multiply"},
{"math.pow", "math.pow"},
{"math.subtract", "math.subtract"},
};
absl::Status AddUnaryPointwiseOpOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr value = Placeholder("value");
ExprNodePtr shape = Placeholder("shape");
for (const auto& [from_op, to_op] : kUnaryPointwiseOps) {
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(from_op,
{CallOpReference("core.const_with_shape._array_shape",
{shape, value})}));
ASSIGN_OR_RETURN(ExprNodePtr to,
CallOpReference("core.const_with_shape",
{shape, CallOpReference(to_op, {value})}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
return absl::OkStatus();
}
bool IsBaseQType(const ExprNodePtr& node) {
return IsScalarQType(DecayOptionalQType(node->qtype()));
}
absl::Status AddBinaryPointwiseOpOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr b = Placeholder("b");
ExprNodePtr shape = Placeholder("shape");
for (const auto& [from_op, to_op] : kBinaryPointwiseOps) {
ASSIGN_OR_RETURN(ExprNodePtr to,
CallOpReference("core.const_with_shape",
{shape, CallOpReference(to_op, {a, b})}));
ASSIGN_OR_RETURN(
ExprNodePtr expanded_a,
CallOpReference("core.const_with_shape._array_shape", {shape, a}));
ASSIGN_OR_RETURN(
ExprNodePtr expanded_b,
CallOpReference("core.const_with_shape._array_shape", {shape, b}));
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference(from_op, {expanded_a, expanded_b}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference(from_op, {expanded_a, b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"b", IsBaseQType}}));
}
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference(from_op, {a, expanded_b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"a", IsBaseQType}}));
}
}
return absl::OkStatus();
}
absl::Status AddArrayShapeOfOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr shape = Placeholder("shape");
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(
"core._array_shape_of",
{CallOpReference(
"core.has._array",
{CallOpReference("core.const_with_shape._array_shape",
{shape, a})})}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, shape));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("core._array_shape_of",
{CallOpReference("core.const_with_shape._array_shape",
{shape, a})}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, shape));
}
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> ConstWithShapeOptimizations() {
PeepholeOptimizationPack optimizations;
RETURN_IF_ERROR(AddArrayShapeOfOptimizations(optimizations));
RETURN_IF_ERROR(AddUnaryPointwiseOpOptimizations(optimizations));
RETURN_IF_ERROR(AddBinaryPointwiseOpOptimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/const_with_shape.h"
#include <memory>
#include <optional>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
class ConstWithShapeOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK_AND_ASSIGN(
optimizer_, CreatePeepholeOptimizer({ConstWithShapeOptimizations}));
GetDenseArrayQType<float>();
GetDenseArrayQType<Unit>();
}
absl::StatusOr<ExprNodePtr> ApplyOptimizer(
absl::StatusOr<ExprNodePtr> status_or_expr) const {
ASSIGN_OR_RETURN(auto expr, ToLowest(status_or_expr));
return ToLowest(optimizer_->ApplyToNode(expr));
}
absl::StatusOr<ExprNodePtr> ToLowest(
const absl::StatusOr<ExprNodePtr>& status_or_expr) const {
if (!status_or_expr.ok()) {
return std::move(status_or_expr).status();
}
return ::arolla::expr::ToLowest(*status_or_expr);
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
TEST_F(ConstWithShapeOptimizationsTest, UnaryPointwiseOpOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"math.exp", {CallOp("core.const_with_shape", {shape, x_plus_y})})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.const_with_shape",
{shape, CallOp("math.exp", {x_plus_y})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.has", {CallOp("core.const_with_shape", {shape, x_plus_y})})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.const_with_shape", {shape, Literal(Unit{})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
TEST_F(ConstWithShapeOptimizationsTest, BinaryPointwiseOpOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto x_minus_y, CallOp("math.subtract", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.equal",
{CallOp("core.const_with_shape", {shape, x_plus_y}),
CallOp("core.const_with_shape", {shape, x_minus_y})})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.const_with_shape",
{shape, CallOp("core.equal", {x_plus_y, x_minus_y})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
TEST_F(ConstWithShapeOptimizationsTest, BinaryOpWithConstantOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetOptionalQType<float>()));
ASSERT_OK_AND_ASSIGN(
auto y, WithQTypeAnnotation(Leaf("y"), GetOptionalQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto x_minus_y, CallOp("math.subtract", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(
CallOp("core.const_with_shape",
{shape, CallOp("core.presence_or", {x_plus_y, x_minus_y})})));
{
SCOPED_TRACE("left expanded, right is not expanded");
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.presence_or",
{CallOp("core.const_with_shape", {shape, x_plus_y}), x_minus_y})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
SCOPED_TRACE("left is not expanded, right is expanded");
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.presence_or",
{x_plus_y, CallOp("core.const_with_shape", {shape, x_minus_y})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
TEST_F(ConstWithShapeOptimizationsTest, ArrayShapeOptimizations) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp(
"core.shape_of", {CallOp("core.has", {CallOp("core.const_with_shape",
{shape, x_plus_y})})})));
EXPECT_THAT(actual_expr, EqualsExpr(shape));
}
TEST_F(ConstWithShapeOptimizationsTest, ArrayShapeOptimizationsForPresence) {
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.shape_of",
{CallOp("core.const_with_shape",
{shape, Literal<OptionalUnit>(std::nullopt)})})));
EXPECT_THAT(actual_expr, EqualsExpr(shape));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/const_with_shape.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/const_with_shape_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4a1c6abc-0c40-4ed1-b36e-8bfe93401374 | cpp | tensorflow/tensorflow | common_shape_fns | tensorflow/core/framework/common_shape_fns.cc | tensorflow/core/framework/common_shape_fns_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/util/einsum_op_util.h"
#include "tensorflow/core/util/tensor_format.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace shape_inference {
Status GetWindowedOutputSizeFromDimsV2(
shape_inference::InferenceContext* c,
shape_inference::DimensionHandle input_size,
shape_inference::DimensionOrConstant filter_size, int64_t dilation_rate,
int64_t stride, Padding padding_type, int64_t padding_before,
int64_t padding_after, shape_inference::DimensionHandle* output_size) {
if (stride <= 0) {
return errors::InvalidArgument("Stride must be > 0, but got ", stride);
}
if (dilation_rate < 1) {
return errors::InvalidArgument("Dilation rate must be >= 1, but got ",
dilation_rate);
}
switch (padding_type) {
case Padding::VALID:
padding_before = padding_after = 0;
TF_FALLTHROUGH_INTENDED;
case Padding::EXPLICIT:
TF_RETURN_IF_ERROR(
c->Add(input_size, padding_before + padding_after, &input_size));
if (dilation_rate > 1) {
DimensionHandle window_size;
TF_RETURN_IF_ERROR(
c->Subtract(c->MakeDim(filter_size), 1, &window_size));
TF_RETURN_IF_ERROR(
c->Multiply(window_size, dilation_rate, &window_size));
TF_RETURN_IF_ERROR(c->Add(window_size, 1, &window_size));
TF_RETURN_IF_ERROR(c->Subtract(input_size, window_size, output_size));
} else {
TF_RETURN_IF_ERROR(c->Subtract(input_size, filter_size, output_size));
}
TF_RETURN_IF_ERROR(c->Add(*output_size, stride, output_size));
TF_RETURN_IF_ERROR(c->Divide(*output_size, stride,
false, output_size));
break;
case Padding::SAME:
TF_RETURN_IF_ERROR(c->Add(input_size, stride - 1, output_size));
TF_RETURN_IF_ERROR(c->Divide(*output_size, stride,
false, output_size));
break;
}
return absl::OkStatus();
}
Status GetWindowedOutputSizeFromDims(
shape_inference::InferenceContext* c,
shape_inference::DimensionHandle input_size,
shape_inference::DimensionOrConstant filter_size, int64_t stride,
Padding padding_type, shape_inference::DimensionHandle* output_size) {
if (padding_type == Padding::EXPLICIT) {
return errors::Internal(
"GetWindowedOutputSizeFromDims does not handle EXPLICIT padding; call "
"GetWindowedOutputSizeFromDimsV2 instead");
}
return GetWindowedOutputSizeFromDimsV2(c, input_size, filter_size,
1, stride,
padding_type,
-1, -1, output_size);
}
Status UnchangedShape(shape_inference::InferenceContext* c) {
c->set_output(0, c->input(0));
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
c->set_output_handle_shapes_and_types(0, *handle_data);
}
return absl::OkStatus();
}
Status MatMulShape(shape_inference::InferenceContext* c) {
ShapeHandle a;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &a));
ShapeHandle b;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &b));
bool transpose_a, transpose_b;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
DimensionHandle output_rows = transpose_a ? c->Dim(a, 1) : c->Dim(a, 0);
DimensionHandle output_cols = transpose_b ? c->Dim(b, 0) : c->Dim(b, 1);
DimensionHandle inner_a = transpose_a ? c->Dim(a, 0) : c->Dim(a, 1);
DimensionHandle inner_b = transpose_b ? c->Dim(b, 1) : c->Dim(b, 0);
DimensionHandle merged;
TF_RETURN_IF_ERROR(c->Merge(inner_a, inner_b, &merged));
c->set_output(0, c->Matrix(output_rows, output_cols));
return absl::OkStatus();
}
namespace {
Status ValidateEinsumEllipsis(absl::string_view subscript,
bool* found_ellipsis) {
const int num_periods = absl::c_count(subscript, '.');
if (num_periods != 0 && num_periods != 3) {
return errors::InvalidArgument(
"Expected at most one ellipsis (...), but found ", num_periods,
" periods (.) in the input subscript: ", subscript);
}
if (num_periods == 3 && !absl::StrContains(subscript, "...")) {
return errors::InvalidArgument(
"Periods found outside of ellipsis in subscript: ", subscript);
}
*found_ellipsis = num_periods > 0;
return absl::OkStatus();
}
}
Status EinsumShape(shape_inference::InferenceContext* c) {
string equation;
TF_RETURN_IF_ERROR(c->GetAttr("equation", &equation));
absl::InlinedVector<string, 2> input_labels;
string output_labels;
TF_RETURN_IF_ERROR(
ValidateEinsumEquation(equation, &input_labels, &output_labels));
if (c->num_inputs() == 0 || c->num_inputs() > 2) {
return errors::InvalidArgument("Expected either 1 or 2 inputs but got: ",
c->num_inputs());
}
const int input_labels_size = input_labels.size();
if (c->num_inputs() != input_labels_size) {
return errors::InvalidArgument("Expected ", input_labels.size(),
" inputs for equation ", equation,
" but got: ", c->num_inputs());
}
absl::flat_hash_map<char, DimensionHandle> label_to_dimension;
absl::InlinedVector<ShapeHandle, 2> input_bcast_shapes(c->num_inputs());
for (int i = 0, end = c->num_inputs(); i < end; ++i) {
bool has_ellipsis = false;
TF_RETURN_IF_ERROR(ValidateEinsumEllipsis(input_labels[i], &has_ellipsis));
ShapeHandle input_shape = c->input(i);
if (c->RankKnown(input_shape)) {
if (has_ellipsis) {
const int num_named_labels =
static_cast<int>(input_labels[i].size()) - 3;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->WithRankAtLeast(input_shape, num_named_labels, &input_shape),
" for ", i, "th input and equation: ", equation);
} else {
const int num_named_labels = static_cast<int>(input_labels[i].size());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->WithRank(input_shape, num_named_labels, &input_shape), " for ",
i, "th input and equation: ", equation);
}
}
bool seen_ellipsis = false;
input_bcast_shapes[i] = c->Scalar();
for (int label_idx = 0, end = input_labels[i].size(); label_idx < end;
++label_idx) {
const char label = input_labels[i][label_idx];
const int64_t axis_before_ellipsis = label_idx;
const int64_t axis_after_ellipsis =
c->RankKnown(input_shape)
? label_idx + c->Rank(input_shape) - input_labels[i].size()
: -1;
if (label == '.') {
if (!c->RankKnown(input_shape)) {
input_bcast_shapes[i] = c->UnknownShape();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input_shape, axis_before_ellipsis,
axis_after_ellipsis + 3,
&input_bcast_shapes[i]));
}
label_idx += 2;
seen_ellipsis = true;
continue;
}
int64_t axis = seen_ellipsis ? axis_after_ellipsis : axis_before_ellipsis;
DimensionHandle new_dim = c->RankKnown(input_shape)
? c->Dim(input_shape, axis)
: c->UnknownDim();
if (label_to_dimension.contains(label)) {
DimensionHandle merged;
TF_RETURN_IF_ERROR(
c->Merge(label_to_dimension[label], new_dim, &merged));
label_to_dimension[label] = merged;
} else {
label_to_dimension[label] = new_dim;
}
}
}
ShapeHandle output_bcast_shape;
if (input_bcast_shapes.size() == 1) {
output_bcast_shape = input_bcast_shapes[0];
} else if (input_bcast_shapes.size() == 2) {
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, input_bcast_shapes[0], input_bcast_shapes[1], true,
&output_bcast_shape));
}
bool output_has_ellipsis = false;
TF_RETURN_IF_ERROR(
ValidateEinsumEllipsis(output_labels, &output_has_ellipsis));
if (output_has_ellipsis) {
if (!c->RankKnown(output_bcast_shape)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
} else {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->WithRankAtMost(output_bcast_shape, 0, &output_bcast_shape),
" for einsum equation '", equation,
"' without ellipsis (...) in the output subscripts where input(s) have "
"non-empty broadcasting shape");
output_bcast_shape = c->Scalar();
}
std::vector<DimensionHandle> output_dims;
for (int label_idx = 0, end = output_labels.size(); label_idx < end;
++label_idx) {
const char label = output_labels[label_idx];
if (label == '.') {
for (int k = 0; k < c->Rank(output_bcast_shape); ++k) {
output_dims.push_back(c->Dim(output_bcast_shape, k));
}
label_idx += 2;
continue;
}
auto dimension_it = label_to_dimension.find(label);
if (dimension_it == label_to_dimension.end()) {
return errors::InvalidArgument(
"Einsum output subscripts for equation '", equation, "' has label '",
label, "' which is not present in the input subscripts");
}
output_dims.push_back(dimension_it->second);
}
c->set_output(0, c->MakeShape(output_dims));
return absl::OkStatus();
}
Status BatchMatMulV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle a_shape;
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &b_shape));
bool adj_x;
bool adj_y;
TF_RETURN_IF_ERROR(c->GetAttr("adj_x", &adj_x));
TF_RETURN_IF_ERROR(c->GetAttr("adj_y", &adj_y));
DimensionHandle output_rows = c->Dim(a_shape, adj_x ? -1 : -2);
DimensionHandle output_cols = c->Dim(b_shape, adj_y ? -2 : -1);
DimensionHandle inner_merged;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, adj_x ? -2 : -1),
c->Dim(b_shape, adj_y ? -1 : -2), &inner_merged));
ShapeHandle a_batch_shape;
ShapeHandle b_batch_shape;
ShapeHandle output_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_shape));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, a_batch_shape, b_batch_shape, true, &output_batch_shape));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->Concatenate(
output_batch_shape, c->Matrix(output_rows, output_cols), &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status BatchMatMulShape(shape_inference::InferenceContext* c) {
ShapeHandle a_shape;
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &b_shape));
bool adj_x;
bool adj_y;
TF_RETURN_IF_ERROR(c->GetAttr("adj_x", &adj_x));
TF_RETURN_IF_ERROR(c->GetAttr("adj_y", &adj_y));
DimensionHandle output_rows = c->Dim(a_shape, adj_x ? -1 : -2);
DimensionHandle output_cols = c->Dim(b_shape, adj_y ? -2 : -1);
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(c->Merge(a_batch_dims, b_batch_dims, &batch_dims));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, adj_x ? -2 : -1),
c->Dim(b_shape, adj_y ? -1 : -2), &unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status BiasAddShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
string data_format;
Status s = c->GetAttr("data_format", &data_format);
if (s.ok() && data_format == "NCHW") {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 3, &input_shape));
} else {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
}
ShapeHandle bias_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &bias_shape));
DimensionHandle bias_dim = c->Dim(bias_shape, 0);
if (!c->RankKnown(input_shape)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
ShapeHandle output_shape;
if (s.ok() && data_format == "NCHW") {
ShapeHandle first;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, 0, 1, &first));
ShapeHandle last;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, 2, &last));
DimensionHandle input_bias_dim = c->Dim(input_shape, 1);
DimensionHandle merged_bias_dim;
TF_RETURN_IF_ERROR(c->Merge(input_bias_dim, bias_dim, &merged_bias_dim));
ShapeHandle merged_bias = c->Vector(merged_bias_dim);
ShapeHandle temp;
TF_RETURN_IF_ERROR(c->Concatenate(first, merged_bias, &temp));
TF_RETURN_IF_ERROR(c->Concatenate(temp, last, &output_shape));
} else {
ShapeHandle all_but_bias;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, 0, -1, &all_but_bias));
DimensionHandle input_bias_dim = c->Dim(input_shape, -1);
DimensionHandle merged_bias_dim;
TF_RETURN_IF_ERROR(c->Merge(input_bias_dim, bias_dim, &merged_bias_dim));
ShapeHandle merged_bias = c->Vector(merged_bias_dim);
TF_RETURN_IF_ERROR(
c->Concatenate(all_but_bias, merged_bias, &output_shape));
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status BiasAddGradShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
string data_format;
Status s = c->GetAttr("data_format", &data_format);
if (s.ok() && data_format == "NCHW") {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 3, &input_shape));
c->set_output(0, c->Vector(c->Dim(input_shape, 1)));
} else {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
c->set_output(0, c->Vector(c->Dim(input_shape, -1)));
}
return absl::OkStatus();
}
Status CheckFormatConstraintsOnShape(const TensorFormat tensor_format,
const ShapeHandle shape_handle,
const string& tensor_name,
shape_inference::InferenceContext* c) {
if (tensor_format == FORMAT_NCHW_VECT_C) {
const int num_dims = c->Rank(shape_handle);
DimensionHandle vect_dim = c->Dim(
shape_handle, GetTensorInnerFeatureDimIndex(num_dims, tensor_format));
int64_t vect_dim_val = c->Value(vect_dim);
if (vect_dim_val != 4 && vect_dim_val != 32) {
return errors::InvalidArgument(
"VECT_C dimension must be 4 or 32, but is ", vect_dim_val);
}
}
return absl::OkStatus();
}
Status DatasetIteratorShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
std::vector<PartialTensorShape> output_shapes;
TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
const int output_shapes_size = output_shapes.size();
if (output_shapes_size != c->num_outputs()) {
return errors::InvalidArgument(
"`output_shapes` must be the same length as `output_types` (",
output_shapes.size(), " vs. ", c->num_outputs());
}
for (size_t i = 0; i < output_shapes.size(); ++i) {
shape_inference::ShapeHandle output_shape_handle;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(
output_shapes[i], &output_shape_handle));
c->set_output(static_cast<int>(i), output_shape_handle);
}
return absl::OkStatus();
}
Status MakeShapeFromFormat(TensorFormat format, DimensionOrConstant N,
const std::vector<DimensionOrConstant>& spatial,
DimensionOrConstant C, ShapeHandle* out,
shape_inference::InferenceContext* context) {
const int num_dims = GetTensorDimsFromSpatialDims(spatial.size(), format);
std::vector<DimensionHandle> dims_actual(num_dims);
dims_actual[GetTensorBatchDimIndex(num_dims, format)] = context->MakeDim(N);
int outer_c_index = GetTensorFeatureDimIndex(num_dims, format);
dims_actual[outer_c_index] = context->MakeDim(C);
if (format == FORMAT_NCHW_VECT_C) {
dims_actual[GetTensorInnerFeatureDimIndex(num_dims, format)] =
context->MakeDim(4);
} else if (format == FORMAT_NHWC_VECT_W) {
dims_actual[GetTensorInnerWidthDimIndex(num_dims, format)] =
context->MakeDim(4);
}
for (int spatial_dim = 0, end = spatial.size(); spatial_dim < end;
spatial_dim++) {
dims_actual[GetTensorSpatialDimIndex(num_dims, format, spatial_dim)] =
context->MakeDim(spatial[spatial_dim]);
}
*out = context->MakeShape(dims_actual);
return absl::OkStatus();
}
Status DimensionsFromShape(ShapeHandle shape, TensorFormat format,
DimensionHandle* batch_dim,
absl::Span<DimensionHandle> spatial_dims,
DimensionHandle* filter_dim,
InferenceContext* context) {
const int32_t rank =
GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
*batch_dim = context->Dim(shape, GetTensorBatchDimIndex(rank, format));
for (int spatial_dim_index = 0, end = spatial_dims.size();
spatial_dim_index < end; ++spatial_dim_index) {
spatial_dims[spatial_dim_index] = context->Dim(
shape, GetTensorSpatialDimIndex(rank, format, spatial_dim_index));
}
*filter_dim = context->Dim(shape, GetTensorFeatureDimIndex(rank, format));
if (format == FORMAT_NCHW_VECT_C) {
TF_RETURN_IF_ERROR(context->Multiply(
*filter_dim,
context->Dim(shape, GetTensorInnerFeatureDimIndex(rank, format)),
filter_dim));
}
return absl::OkStatus();
}
Status ShapeFromDimensions(DimensionHandle batch_dim,
absl::Span<const DimensionHandle> spatial_dims,
DimensionHandle filter_dim, TensorFormat format,
absl::optional<DimensionHandle> vect_size,
InferenceContext* context, ShapeHandle* shape) {
const int32_t rank =
GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
std::vector<DimensionHandle> out_dims(rank);
out_dims[tensorflow::GetTensorBatchDimIndex(rank, format)] = batch_dim;
for (int spatial_dim_index = 0, end = spatial_dims.size();
spatial_dim_index < end; ++spatial_dim_index) {
out_dims[tensorflow::GetTensorSpatialDimIndex(
rank, format, spatial_dim_index)] = spatial_dims[spatial_dim_index];
}
if (format == tensorflow::FORMAT_NCHW_VECT_C) {
CHECK(vect_size.has_value());
TF_RETURN_IF_ERROR(context->Divide(
filter_dim, *vect_size, true,
&out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)]));
out_dims[GetTensorInnerFeatureDimIndex(rank, format)] = *vect_size;
} else {
out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)] = filter_dim;
}
*shape = context->MakeShape(out_dims);
return absl::OkStatus();
}
namespace {
Status Conv2DShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
string data_format_str, filter_format_str;
if (!c->GetAttr("data_format", &data_format_str).ok()) {
data_format_str = "NHWC";
}
if (!c->GetAttr("filter_format", &filter_format_str).ok()) {
filter_format_str =
data_format_str == "NCHW_VECT_C" ? "OIHW_VECT_I" : "HWIO";
}
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
FilterTensorFormat filter_format;
if (!FilterFormatFromString(filter_format_str, &filter_format)) {
return errors::InvalidArgument("Invalid filter format string: ",
filter_format_str);
}
constexpr int num_spatial_dims = 2;
const int rank = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format);
ShapeHandle conv_input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &conv_input_shape));
TF_RETURN_IF_ERROR(CheckFormatConstraintsOnShape(
data_format, conv_input_shape, "conv_input", c));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &filter_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, filter_shape, "filter", c));
std::vector<int32> dilations;
TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations));
if (dilations.size() != 4) {
return errors::InvalidArgument(
"Conv2D requires the dilation attribute to contain 4 values, but got: ",
dilations.size());
}
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument("Conv2D on data format ", data_format_str,
" requires the stride attribute to contain"
" 4 values, but got: ",
strides.size());
}
const int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
const int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
const int32_t dilation_rows = GetTensorDim(dilations, data_format, 'H');
const int32_t dilation_cols = GetTensorDim(dilations, data_format, 'W');
DimensionHandle batch_size_dim;
DimensionHandle input_depth_dim;
absl::InlinedVector<DimensionHandle, 2> input_spatial_dims(2);
TF_RETURN_IF_ERROR(DimensionsFromShape(
conv_input_shape, data_format, &batch_size_dim,
absl::MakeSpan(input_spatial_dims), &input_depth_dim, c));
DimensionHandle output_depth_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'O'));
DimensionHandle filter_rows_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'H'));
DimensionHandle filter_cols_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'W'));
DimensionHandle filter_input_depth_dim;
if (filter_format == FORMAT_OIHW_VECT_I) {
TF_RETURN_IF_ERROR(c->Multiply(
c->Dim(filter_shape,
GetFilterDimIndex<num_spatial_dims>(filter_format, 'I')),
c->Dim(filter_shape,
GetFilterTensorInnerInputChannelsDimIndex(rank, filter_format)),
&filter_input_depth_dim));
} else {
filter_input_depth_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'I'));
}
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64_t input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
if (filter_input_depth_value == 0)
return errors::InvalidArgument("Depth of filter must not be 0");
if (input_depth_value % filter_input_depth_value != 0)
return errors::InvalidArgument(
"Depth of input (", input_depth_value,
") is not a multiple of input depth of filter (",
filter_input_depth_value, ")");
if (input_depth_value != filter_input_depth_value) {
int64_t num_groups = input_depth_value / filter_input_depth_value;
if (c->ValueKnown(output_depth_dim)) {
int64_t output_depth_value = c->Value(output_depth_dim);
if (num_groups == 0)
return errors::InvalidArgument("Number of groups must not be 0");
if (output_depth_value % num_groups != 0)
return errors::InvalidArgument(
"Depth of output (", output_depth_value,
") is not a multiple of the number of groups (", num_groups, ")");
}
}
}
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status s = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!s.ok() && !errors::IsNotFound(s)) {
return s;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
} else {
if (padding == Padding::EXPLICIT) {
return errors::InvalidArgument(
"Expected non-explicit padding but got explicit padding");
}
std::vector<int64_t> p_list;
Status s_p_list = c->GetAttr("padding_list", &p_list);
if (!s_p_list.ok() && !errors::IsNotFound(s_p_list)) {
return s_p_list;
}
if (s_p_list.ok() && !p_list.empty()) {
padding = Padding::EXPLICIT;
explicit_paddings = p_list;
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
}
}
DimensionHandle output_rows, output_cols;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, input_spatial_dims[0], filter_rows_dim, dilation_rows, stride_rows,
padding, pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, input_spatial_dims[1], filter_cols_dim, dilation_cols, stride_cols,
padding, pad_cols_before, pad_cols_after, &output_cols));
absl::optional<DimensionHandle> vect_size;
if (data_format == FORMAT_NCHW_VECT_C) {
vect_size.emplace(c->Dim(conv_input_shape,
GetTensorInnerFeatureDimIndex(rank, data_format)));
}
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(ShapeFromDimensions(
batch_size_dim, {output_rows, output_cols}, output_depth_dim, data_format,
vect_size, c, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
}
Status ConvShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape = c->input(0);
ShapeHandle filter_shape = c->input(1);
int input_rank = c->Rank(input_shape);
int filter_rank = c->Rank(filter_shape);
if (input_rank == InferenceContext::kUnknownRank ||
filter_rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int batch_dims;
TF_RETURN_IF_ERROR(c->GetAttr("batch_dims", &batch_dims));
if (batch_dims < 0) {
return absl::InvalidArgumentError("Batch dims must be non-negative.");
}
int standard_input_rank = input_rank - (batch_dims - 1);
if (standard_input_rank != 4 && standard_input_rank != 5) {
return absl::InvalidArgumentError(
absl::StrCat("Input tensor must be rank 4 or 5, excluding extra "
"batch dimensions, but got: ",
standard_input_rank));
}
if (filter_rank != 4 && filter_rank != 5) {
return absl::InvalidArgumentError(absl::StrCat(
"Filter tensor must be rank 4 or 5, but got: ", standard_input_rank));
}
if (filter_rank != standard_input_rank) {
return absl::InvalidArgumentError(
"Input tensor rank must be the same as filter rank.");
}
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
bool channels_last_format;
if (data_format_str == "CHANNELS_LAST") {
channels_last_format = true;
} else if (data_format_str == "CHANNELS_FIRST") {
channels_last_format = false;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Invalid data format: ", data_format_str));
}
TensorFormat data_format = channels_last_format ? FORMAT_NHWC : FORMAT_NCHW;
FilterTensorFormat filter_format = FORMAT_HWIO;
int spatial_dims = standard_input_rank - 2;
std::vector<int32> dilations;
TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations));
if (dilations.empty()) {
for (int i = 0; i < standard_input_rank; ++i) dilations.push_back(1);
}
if (dilations.size() != standard_input_rank) {
return absl::InvalidArgumentError(absl::StrCat(
"Conv requires the dilation attribute to contain ", standard_input_rank,
" values, but got: ", dilations.size()));
}
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != standard_input_rank) {
return absl::InvalidArgumentError(
absl::StrCat("Stride attribute should contain ", standard_input_rank,
" values, but got: ", strides.size()));
}
auto dim_index = [&](char dimension) {
if (spatial_dims == 2)
return GetTensorDimIndex<2>(data_format, dimension);
else
return GetTensorDimIndex<3>(data_format, dimension);
};
std::vector<int32_t> stride_dims(spatial_dims);
std::vector<int32_t> dilation_dims(spatial_dims);
for (int i = 0; i < spatial_dims; ++i) {
stride_dims[i] = strides[dim_index(static_cast<char>('0' + i))];
dilation_dims[i] = dilations[dim_index(static_cast<char>('0' + i))];
}
std::vector<DimensionHandle> batch_size_dim(batch_dims);
for (int i = 0; i < batch_dims; ++i) {
batch_size_dim[i] = c->Dim(input_shape, i);
}
std::vector<DimensionHandle> in_spatial_dims(spatial_dims);
for (int i = 0; i < spatial_dims; ++i) {
in_spatial_dims[i] = c->Dim(
input_shape, (batch_dims - 1) + dim_index(static_cast<char>('0' + i)));
}
DimensionHandle input_depth_dim =
c->Dim(input_shape, (batch_dims - 1) + dim_index('C'));
auto filter_dim_index = [&](char dimension) {
if (spatial_dims == 2)
return GetFilterDimIndex<2>(filter_format, dimension);
else
return GetFilterDimIndex<3>(filter_format, dimension);
};
std::vector<DimensionHandle> filter_spatial_dims(spatial_dims);
for (int i = 0; i < spatial_dims; ++i) {
filter_spatial_dims[i] =
c->Dim(filter_shape, filter_dim_index(static_cast<char>('0' + i)));
}
DimensionHandle output_depth_dim =
c->Dim(filter_shape, filter_dim_index('O'));
DimensionHandle filter_input_depth_dim;
filter_input_depth_dim = c->Dim(filter_shape, filter_dim_index('I'));
int groups;
TF_RETURN_IF_ERROR(c->GetAttr("groups", &groups));
if (groups < 1) {
return absl::InvalidArgumentError(
"Groups attribute should be a positive integer");
} else if (c->ValueKnown(input_depth_dim) &&
c->Value(input_depth_dim) % groups != 0) {
return absl::InvalidArgumentError(
"Number of groups should divide input depth");
} else if (c->ValueKnown(output_depth_dim) &&
c->Value(output_depth_dim) % groups != 0) {
return absl::InvalidArgumentError(
"Number of groups should divide output depth");
}
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64_t input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
if (filter_input_depth_value == 0) {
return absl::InvalidArgumentError("Depth of filter must not be 0");
}
if (input_depth_value % filter_input_depth_value != 0) {
return absl::InvalidArgumentError(
absl::StrCat("Depth of input (", input_depth_value,
") is not a multiple of input depth of filter (",
filter_input_depth_value, ")"));
}
if (input_depth_value / filter_input_depth_value != groups) {
return absl::InvalidArgumentError(
absl::StrCat("Input depth divided by filter input depth does not "
"match with groups parameter (",
groups, ")"));
}
}
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
if (spatial_dims == 3 && padding == Padding::EXPLICIT) {
return absl::InvalidArgumentError(
"Explicit padding not supported for 3D Convolution");
}
std::vector<int64_t> explicit_paddings;
Status s = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!s.ok() && !absl::IsNotFound(s)) {
return s;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
std::vector<DimensionHandle> output_spatial_dims(spatial_dims);
std::vector<int64_t> pad_before(spatial_dims, -1);
std::vector<int64_t> pad_after(spatial_dims, -1);
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_before[0], &pad_after[0]);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_before[1], &pad_after[1]);
}
for (int i = 0; i < spatial_dims; ++i) {
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_spatial_dims[i], filter_spatial_dims[i], dilation_dims[i],
stride_dims[i], padding, pad_before[i], pad_after[i],
&output_spatial_dims[i]));
}
ShapeHandle output_shape;
std::vector<DimensionHandle> output_shape_vector(input_rank);
for (int i = 0; i < batch_dims; ++i) {
output_shape_vector[i] = batch_size_dim[i];
}
if (channels_last_format) {
for (int i = 0; i < spatial_dims; ++i) {
output_shape_vector[batch_dims + i] = output_spatial_dims[i];
}
output_shape_vector[batch_dims + spatial_dims] = output_depth_dim;
} else {
output_shape_vector[batch_dims] = output_depth_dim;
for (int i = 0; i < spatial_dims; ++i) {
output_shape_vector[batch_dims + 1 + i] = output_spatial_dims[i];
}
}
output_shape = c->MakeShape(output_shape_vector);
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status Conv2DShapeWithExplicitPadding(shape_inference::InferenceContext* c) {
return Conv2DShapeImpl(c, true);
}
Status Conv2DShape(shape_inference::InferenceContext* c) {
return Conv2DShapeImpl(c, false);
}
Status Conv3DShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 5, &filter_shape));
string data_format;
Status s = c->GetAttr("data_format", &data_format);
std::vector<int32> dilations;
TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations));
if (dilations.size() != 5) {
return errors::InvalidArgument(
"Conv3D requires the dilation attribute to contain 5 values, but got: ",
dilations.size());
}
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 5) {
return errors::InvalidArgument(
"Conv3D requires the stride attribute to contain 5 values, but got: ",
strides.size());
}
int32_t stride_planes, stride_rows, stride_cols;
int32_t dilation_planes, dilation_rows, dilation_cols;
if (s.ok() && data_format == "NCDHW") {
auto dim = [&](char dimension) {
return c->Dim(input_shape, GetTensorDimIndex<3>(FORMAT_NCHW, dimension));
};
input_shape =
c->MakeShape({{dim('N'), dim('0'), dim('1'), dim('2'), dim('C')}});
stride_planes = strides[2];
stride_rows = strides[3];
stride_cols = strides[4];
dilation_planes = dilations[2];
dilation_cols = dilations[3];
dilation_rows = dilations[4];
} else {
stride_planes = strides[1];
stride_rows = strides[2];
stride_cols = strides[3];
dilation_planes = dilations[1];
dilation_cols = dilations[2];
dilation_rows = dilations[3];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_planes_dim = c->Dim(input_shape, 1);
DimensionHandle in_rows_dim = c->Dim(input_shape, 2);
DimensionHandle in_cols_dim = c->Dim(input_shape, 3);
DimensionHandle input_depth_dim = c->Dim(input_shape, 4);
DimensionHandle filter_planes_dim = c->Dim(filter_shape, 0);
DimensionHandle filter_rows_dim = c->Dim(filter_shape, 1);
DimensionHandle filter_cols_dim = c->Dim(filter_shape, 2);
DimensionHandle filter_input_depth_dim = c->Dim(filter_shape, 3);
DimensionHandle output_depth_dim = c->Dim(filter_shape, 4);
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64_t input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
if (filter_input_depth_value == 0)
return errors::InvalidArgument("Depth of filter must not be 0");
if (input_depth_value % filter_input_depth_value != 0)
return errors::InvalidArgument(
"Depth of input (", input_depth_value,
") is not a multiple of input depth of filter (",
filter_input_depth_value, ")");
if (input_depth_value != filter_input_depth_value) {
int64_t num_groups = input_depth_value / filter_input_depth_value;
if (c->ValueKnown(output_depth_dim)) {
int64_t output_depth_value = c->Value(output_depth_dim);
if (num_groups == 0)
return errors::InvalidArgument("Number of groups must not be 0");
if (output_depth_value % num_groups != 0)
return errors::InvalidArgument(
"Depth of output (", output_depth_value,
") is not a multiple of the number of groups (", num_groups, ")");
}
}
}
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
DimensionHandle output_planes, output_rows, output_cols;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_planes_dim, filter_planes_dim, dilation_planes, stride_planes,
padding, -1, -1, &output_planes));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding, -1,
-1, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding, -1,
-1, &output_cols));
ShapeHandle output_shape;
if (data_format == "NCDHW") {
output_shape = c->MakeShape({batch_size_dim, output_depth_dim,
output_planes, output_rows, output_cols});
} else {
output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows,
output_cols, output_depth_dim});
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status Conv2DBackpropInputShape(shape_inference::InferenceContext* c) {
string data_format_str;
if (!c->GetAttr("data_format", &data_format_str).ok()) {
data_format_str = "NHWC";
}
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
ShapeHandle output_grad_shape = c->input(2);
TF_RETURN_IF_ERROR(c->WithRank(output_grad_shape, 4, &output_grad_shape));
ShapeHandle filter_shape = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(filter_shape, 4, &filter_shape));
DimensionHandle batch_size_dim;
DimensionHandle output_grad_depth_dim;
absl::InlinedVector<DimensionHandle, 2> output_grad_spatial_dims(2);
TF_RETURN_IF_ERROR(DimensionsFromShape(
output_grad_shape, data_format, &batch_size_dim,
absl::MakeSpan(output_grad_spatial_dims), &output_grad_depth_dim, c));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->Merge(output_grad_depth_dim, c->Dim(filter_shape, 3), &unused));
ShapeHandle specified_input_grad_shape;
TF_RETURN_IF_ERROR(
c->MakeShapeFromShapeTensor(0, &specified_input_grad_shape));
if (c->Rank(specified_input_grad_shape) == InferenceContext::kUnknownRank) {
TF_RETURN_IF_ERROR(c->WithRank(specified_input_grad_shape, 4,
&specified_input_grad_shape));
}
DimensionHandle input_grad_depth_dim;
absl::InlinedVector<DimensionHandle, 2> specified_input_grad_spatial_dims(2);
int specified_input_grad_rank = c->Rank(specified_input_grad_shape);
if (specified_input_grad_rank == 4) {
DimensionHandle specified_batch_size_dim;
TF_RETURN_IF_ERROR(DimensionsFromShape(
specified_input_grad_shape, data_format, &specified_batch_size_dim,
absl::MakeSpan(specified_input_grad_spatial_dims),
&input_grad_depth_dim, c));
TF_RETURN_IF_ERROR(
c->Merge(specified_batch_size_dim, batch_size_dim, &unused));
} else if (specified_input_grad_rank == 2) {
specified_input_grad_spatial_dims[0] =
c->Dim(specified_input_grad_shape, 0);
specified_input_grad_spatial_dims[1] =
c->Dim(specified_input_grad_shape, 1);
input_grad_depth_dim = c->Dim(filter_shape, 2);
} else {
return errors::InvalidArgument(
"Conv2DBackpropInput requires input_sizes to contain 4 values or 2 "
"values, but got: ",
specified_input_grad_rank);
}
ShapeHandle input_grad_shape;
TF_RETURN_IF_ERROR(ShapeFromDimensions(
batch_size_dim, specified_input_grad_spatial_dims, input_grad_depth_dim,
data_format, absl::nullopt, c, &input_grad_shape));
c->set_output(0, input_grad_shape);
return absl::OkStatus();
}
Status Conv2DBackpropFilterWithBiasShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
string data_format;
Status s = c->GetAttr("data_format", &data_format);
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
if (s.ok() && data_format == "NCHW") {
c->set_output(1, c->Vector(c->Dim(input_shape, -3)));
} else {
c->set_output(1, c->Vector(c->Dim(input_shape, -1)));
}
ShapeHandle sh;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &sh));
TF_RETURN_IF_ERROR(c->WithRank(sh, 4, &sh));
c->set_output(0, sh);
return absl::OkStatus();
}
namespace {
Status DepthwiseConv2DNativeShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"DepthwiseConv2D requires the stride attribute to contain 4 values, "
"but got: ",
strides.size());
}
std::vector<int32> dilations;
if (!c->GetAttr("dilations", &dilations).ok()) {
dilations.resize(4, 1);
}
if (dilations.size() != 4) {
return errors::InvalidArgument(
"DepthwiseConv2D requires the dilations attribute to contain 4 values, "
"but got: ",
dilations.size());
}
string data_format_str;
Status s = c->GetAttr("data_format", &data_format_str);
TensorFormat data_format;
if (!s.ok() || !FormatFromString(data_format_str, &data_format)) {
data_format = FORMAT_NHWC;
}
int32_t stride_rows;
int32_t stride_cols;
int32_t dilation_rows;
int32_t dilation_cols;
if (data_format == FORMAT_NCHW) {
input_shape =
c->MakeShape({{c->Dim(input_shape, 0), c->Dim(input_shape, 2),
c->Dim(input_shape, 3), c->Dim(input_shape, 1)}});
stride_rows = strides[2];
stride_cols = strides[3];
dilation_rows = dilations[2];
dilation_cols = dilations[3];
} else {
stride_rows = strides[1];
stride_cols = strides[2];
dilation_rows = dilations[1];
dilation_cols = dilations[2];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_rows_dim = c->Dim(input_shape, 1);
DimensionHandle in_cols_dim = c->Dim(input_shape, 2);
DimensionHandle filter_rows_dim = c->Dim(filter_shape, 0);
DimensionHandle filter_cols_dim = c->Dim(filter_shape, 1);
DimensionHandle input_depth = c->Dim(filter_shape, 2);
DimensionHandle depth_multiplier = c->Dim(filter_shape, 3);
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input_shape, 3), input_depth, &input_depth));
DimensionHandle output_depth;
TF_RETURN_IF_ERROR(c->Multiply(input_depth, depth_multiplier, &output_depth));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status status = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!status.ok() && !errors::IsNotFound(status)) {
return status;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
} else {
DCHECK(padding != Padding::EXPLICIT);
}
DimensionHandle output_rows, output_cols;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding,
pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding,
pad_cols_before, pad_cols_after, &output_cols));
ShapeHandle output_shape;
if (data_format == FORMAT_NCHW) {
output_shape =
c->MakeShape({batch_size_dim, output_depth, output_rows, output_cols});
} else {
output_shape =
c->MakeShape({batch_size_dim, output_rows, output_cols, output_depth});
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
};
Status DepthwiseConv2DNativeShape(shape_inference::InferenceContext* c) {
return DepthwiseConv2DNativeShapeImpl(c, false);
}
Status DepthwiseConv2DNativeShapeWithExplicitPadding(
shape_inference::InferenceContext* c) {
return DepthwiseConv2DNativeShapeImpl(c, true);
}
Status AvgPoolShape(shape_inference::InferenceContext* c) {
string data_format_str;
TensorFormat data_format;
Status s = c->GetAttr("data_format", &data_format_str);
if (s.ok()) {
FormatFromString(data_format_str, &data_format);
} else {
data_format = FORMAT_NHWC;
}
const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4;
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, input_shape, "input", c));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"AvgPool requires the stride attribute to contain 4 values, but got: ",
strides.size());
}
std::vector<int32> kernel_sizes;
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
if (kernel_sizes.size() != 4) {
return errors::InvalidArgument(
"AvgPool requires the ksize attribute to contain 4 values, but got: ",
kernel_sizes.size());
}
int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H');
int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W');
constexpr int num_spatial_dims = 2;
DimensionHandle batch_size_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N'));
DimensionHandle in_rows_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H'));
DimensionHandle in_cols_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W'));
DimensionHandle depth_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C'));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
DimensionHandle output_rows, output_cols;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim,
{output_rows, output_cols}, depth_dim,
&output_shape, c));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status AvgPoolGradShape(shape_inference::InferenceContext* c) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s));
TF_RETURN_IF_ERROR(c->WithRank(s, 4, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status FusedBatchNormShape(shape_inference::InferenceContext* c) {
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
const int rank =
(data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4;
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &x));
bool is_training;
TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training));
float exponential_avg_factor;
if (!c->GetAttr("exponential_avg_factor", &exponential_avg_factor).ok()) {
exponential_avg_factor = 1.0f;
}
int number_inputs = (is_training && exponential_avg_factor == 1.0f) ? 3 : 5;
int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format);
DimensionHandle channel_dim = c->Dim(x, channel_dim_index);
for (int i = 1; i < number_inputs; ++i) {
ShapeHandle vec;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec));
TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim));
}
ShapeHandle y;
TF_RETURN_IF_ERROR(c->ReplaceDim(x, channel_dim_index, channel_dim, &y));
c->set_output(0, y);
ShapeHandle vector_shape = c->Vector(channel_dim);
c->set_output(1, vector_shape);
c->set_output(2, vector_shape);
c->set_output(3, vector_shape);
c->set_output(4, vector_shape);
return absl::OkStatus();
}
Status FusedBatchNormV3Shape(shape_inference::InferenceContext* c) {
TF_RETURN_IF_ERROR(FusedBatchNormShape(c));
c->set_output(5, c->UnknownShape());
return absl::OkStatus();
}
Status FusedBatchNormExShape(shape_inference::InferenceContext* c) {
TF_RETURN_IF_ERROR(FusedBatchNormV3Shape(c));
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &x));
int channel_dim_index = GetTensorFeatureDimIndex(4, data_format);
DimensionHandle channel_dim = c->Dim(x, channel_dim_index);
if (c->ValueKnown(channel_dim) && c->Value(channel_dim) % 4 != 0) {
return errors::InvalidArgument(
"_FusedBatchNormEx channel dimension must be divisible by 4.");
}
return absl::OkStatus();
}
Status FusedBatchNormGradShape(shape_inference::InferenceContext* c) {
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
const int rank =
(data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4;
ShapeHandle y_backprop;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &y_backprop));
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &x));
bool is_training;
TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training));
int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format);
DimensionHandle channel_dim = c->Dim(y_backprop, channel_dim_index);
TF_RETURN_IF_ERROR(
c->Merge(channel_dim, c->Dim(x, channel_dim_index), &channel_dim));
for (int i = 2; i < 5; ++i) {
ShapeHandle vec;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec));
TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim));
}
ShapeHandle x_backprop;
TF_RETURN_IF_ERROR(
c->ReplaceDim(y_backprop, channel_dim_index, channel_dim, &x_backprop));
c->set_output(0, x_backprop);
c->set_output(1, c->Vector(channel_dim));
c->set_output(2, c->Vector(channel_dim));
c->set_output(3, c->Vector(0));
c->set_output(4, c->Vector(0));
return absl::OkStatus();
}
Status FusedBatchNormGradExShape(shape_inference::InferenceContext* c) {
TF_RETURN_IF_ERROR(FusedBatchNormGradShape(c));
int num_side_inputs;
TF_RETURN_IF_ERROR(c->GetAttr("num_side_inputs", &num_side_inputs));
if (num_side_inputs == 0) {
return absl::OkStatus();
}
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
const int rank =
(data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4;
ShapeHandle y_backprop;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &y_backprop));
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &x));
int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format);
DimensionHandle channel_dim = c->Dim(y_backprop, channel_dim_index);
TF_RETURN_IF_ERROR(
c->Merge(channel_dim, c->Dim(x, channel_dim_index), &channel_dim));
ShapeHandle side_input_backprop;
TF_RETURN_IF_ERROR(c->ReplaceDim(y_backprop, channel_dim_index, channel_dim,
&side_input_backprop));
c->set_output(5, side_input_backprop);
return absl::OkStatus();
}
Status ReadDiagIndex(InferenceContext* c, const Tensor* diag_index_tensor,
int32* lower_diag_index, int32* upper_diag_index) {
if (diag_index_tensor->dims() == 0) {
*lower_diag_index = diag_index_tensor->scalar<int32>()();
*upper_diag_index = *lower_diag_index;
} else {
int32_t num_elements = diag_index_tensor->dim_size(0);
if (num_elements == 1) {
*lower_diag_index = diag_index_tensor->vec<int32>()(0);
*upper_diag_index = *lower_diag_index;
} else if (num_elements == 2) {
*lower_diag_index = diag_index_tensor->vec<int32>()(0);
*upper_diag_index = diag_index_tensor->vec<int32>()(1);
} else {
return errors::InvalidArgument(
"diag_index must be a vector with one or two elements. It has ",
num_elements, " elements.");
}
}
return absl::OkStatus();
}
Status MatrixDiagPartV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape, diag_index_shape, unused_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &diag_index_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_shape));
const Tensor* diag_index_tensor = c->input_tensor(1);
if (!c->RankKnown(input_shape) || !c->FullyDefined(diag_index_shape) ||
diag_index_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int32_t lower_diag_index = 0;
int32_t upper_diag_index = 0;
TF_RETURN_IF_ERROR(ReadDiagIndex(c, diag_index_tensor, &lower_diag_index,
&upper_diag_index));
if (lower_diag_index > upper_diag_index) {
return errors::InvalidArgument(
"lower_diag_index is greater than upper_diag_index");
}
const int32_t input_rank = c->Rank(input_shape);
const int32_t num_rows = c->Value(c->Dim(input_shape, input_rank - 2));
const int32_t num_cols = c->Value(c->Dim(input_shape, input_rank - 1));
int32_t max_diag_len = InferenceContext::kUnknownDim;
if (num_rows != InferenceContext::kUnknownDim &&
num_cols != InferenceContext::kUnknownDim) {
if (lower_diag_index != 0 &&
(-num_rows >= lower_diag_index || lower_diag_index >= num_cols)) {
return errors::InvalidArgument("lower_diag_index is out of bound.");
}
if (upper_diag_index != 0 &&
(-num_rows >= upper_diag_index || upper_diag_index >= num_cols)) {
return errors::InvalidArgument("upper_diag_index is out of bound.");
}
max_diag_len = std::min(num_rows + std::min(upper_diag_index, 0),
num_cols - std::max(lower_diag_index, 0));
}
std::vector<DimensionHandle> dims;
dims.reserve(input_rank - 2);
for (int i = 0; i < input_rank - 2; ++i) {
dims.push_back(c->Dim(input_shape, i));
}
if (lower_diag_index < upper_diag_index) {
dims.push_back(c->MakeDim(upper_diag_index - lower_diag_index + 1));
}
dims.push_back(c->MakeDim(max_diag_len));
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
Status MatrixDiagV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape, diag_index_shape, unused_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &input_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &diag_index_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused_shape));
const Tensor* diag_index_tensor = c->input_tensor(1);
if (!c->RankKnown(input_shape) || !c->FullyDefined(diag_index_shape) ||
diag_index_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int32_t lower_diag_index = 0;
int32_t upper_diag_index = 0;
TF_RETURN_IF_ERROR(ReadDiagIndex(c, diag_index_tensor, &lower_diag_index,
&upper_diag_index));
if (lower_diag_index > upper_diag_index) {
return errors::InvalidArgument(
"lower_diag_index is greater than upper_diag_index");
}
const int32_t input_rank = c->Rank(input_shape);
if (lower_diag_index < upper_diag_index) {
const int32_t num_diags = c->Value(c->Dim(input_shape, input_rank - 2));
const int32_t other_dim = c->Value(c->Dim(input_shape, input_rank - 1));
if (num_diags != (upper_diag_index - lower_diag_index + 1)) {
return errors::InvalidArgument(
"The number of rows of `diagonal` doesn't match the number of "
"diagonals implied from `d_lower` and `d_upper`.\n",
"num_diags = ", num_diags, ", d_lower = ", lower_diag_index,
", d_upper = ", upper_diag_index, " ", input_rank, " ", other_dim);
}
}
const Tensor* num_rows_tensor = c->input_tensor(2);
const Tensor* num_cols_tensor = c->input_tensor(3);
int64_t num_rows = -1;
int64_t num_cols = -1;
if (num_rows_tensor != nullptr) {
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(num_rows_tensor, &num_rows));
}
if (num_cols_tensor != nullptr) {
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(num_cols_tensor, &num_cols));
}
const int32_t max_diag_len = c->Value(c->Dim(input_shape, input_rank - 1));
const int32_t min_num_rows = max_diag_len - std::min(upper_diag_index, 0);
const int32_t min_num_cols = max_diag_len + std::max(lower_diag_index, 0);
if (num_rows == -1 && num_cols == -1) {
num_rows = std::max(min_num_rows, min_num_cols);
num_cols = num_rows;
}
if (num_rows == -1) {
num_rows = min_num_rows;
} else if (num_rows < min_num_rows) {
return errors::InvalidArgument("num_rows is too small");
}
if (num_cols == -1) {
num_cols = min_num_cols;
} else if (num_cols < min_num_cols) {
return errors::InvalidArgument("num_cols is too small.");
}
if (num_rows != min_num_rows && num_cols != min_num_cols) {
return errors::InvalidArgument(
"num_rows and num_cols are not consistent with lower_diag_index, "
"upper_diag_index, and the length of the given diagonals.\n",
"num_rows = ", num_rows, " != min_num_rows = ", min_num_rows,
", num_cols = ", num_cols, " != min_num_cols = ", min_num_cols);
}
ShapeHandle output_shape;
const DimensionHandle output_row_dim = c->MakeDim(num_rows);
const DimensionHandle output_col_dim = c->MakeDim(num_cols);
if (lower_diag_index == upper_diag_index) {
TF_RETURN_IF_ERROR(c->ReplaceDim(input_shape, input_rank - 1,
output_row_dim, &output_shape));
TF_RETURN_IF_ERROR(
c->Concatenate(output_shape, c->Vector(output_col_dim), &output_shape));
} else {
TF_RETURN_IF_ERROR(c->ReplaceDim(input_shape, input_rank - 2,
output_row_dim, &output_shape));
TF_RETURN_IF_ERROR(c->ReplaceDim(output_shape, input_rank - 1,
output_col_dim, &output_shape));
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MatrixSetDiagV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape, diag_shape, diag_index_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &diag_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(2), 1, &diag_index_shape));
int32_t lower_diag_index = 0;
int32_t upper_diag_index = 0;
bool diag_index_known = false;
const Tensor* diag_index_tensor = c->input_tensor(2);
if (diag_index_tensor != nullptr && c->FullyDefined(diag_index_shape)) {
diag_index_known = true;
TF_RETURN_IF_ERROR(ReadDiagIndex(c, diag_index_tensor, &lower_diag_index,
&upper_diag_index));
if (lower_diag_index > upper_diag_index) {
return errors::InvalidArgument(
"lower_diag_index is greater than upper_diag_index");
}
}
if (c->RankKnown(input_shape)) {
int32_t input_rank = c->Rank(input_shape);
if (diag_index_known) {
TF_RETURN_IF_ERROR(c->WithRank(
c->input(1),
(lower_diag_index == upper_diag_index) ? input_rank - 1 : input_rank,
&diag_shape));
} else {
TF_RETURN_IF_ERROR(
c->WithRankAtLeast(c->input(1), input_rank - 1, &diag_shape));
TF_RETURN_IF_ERROR(
c->WithRankAtMost(c->input(1), input_rank, &diag_shape));
}
const int32_t num_rows = c->Value(c->Dim(input_shape, input_rank - 2));
const int32_t num_cols = c->Value(c->Dim(input_shape, input_rank - 1));
if (num_rows != InferenceContext::kUnknownDim &&
num_cols != InferenceContext::kUnknownDim) {
if (lower_diag_index != 0 &&
(-num_rows >= lower_diag_index || lower_diag_index >= num_cols)) {
return errors::InvalidArgument("lower_diag_index is out of bound.");
}
if (upper_diag_index != 0 &&
(-num_rows >= upper_diag_index || upper_diag_index >= num_cols)) {
return errors::InvalidArgument("upper_diag_index is out of bound.");
}
}
}
ShapeHandle output_shape = input_shape;
if (c->RankKnown(diag_shape) && !c->FullyDefined(input_shape)) {
ShapeHandle diag_prefix;
TF_RETURN_IF_ERROR(c->Subshape(
diag_shape, 0, (lower_diag_index == upper_diag_index) ? -1 : -2,
&diag_prefix));
TF_RETURN_IF_ERROR(
c->Concatenate(diag_prefix, c->UnknownShapeOfRank(2), &diag_shape));
TF_RETURN_IF_ERROR(c->Merge(input_shape, diag_shape, &output_shape));
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MaxPoolShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
string data_format_str;
TensorFormat data_format;
Status s = c->GetAttr("data_format", &data_format_str);
if (s.ok()) {
FormatFromString(data_format_str, &data_format);
} else {
data_format = FORMAT_NHWC;
}
const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4;
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, input_shape, "input", c));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the stride attribute to contain 4 values, but got: ",
strides.size());
}
std::vector<int32> kernel_sizes;
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
if (kernel_sizes.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the ksize attribute to contain 4 values, but got: ",
kernel_sizes.size());
}
int32_t stride_depth = GetTensorDim(strides, data_format, 'C');
int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
int32_t kernel_depth = GetTensorDim(kernel_sizes, data_format, 'C');
int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H');
int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W');
constexpr int num_spatial_dims = 2;
DimensionHandle batch_size_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N'));
DimensionHandle in_rows_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H'));
DimensionHandle in_cols_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W'));
DimensionHandle in_depth_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C'));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status status = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!status.ok() && !errors::IsNotFound(status)) {
return status;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
} else {
DCHECK(padding != Padding::EXPLICIT);
}
ShapeHandle output_shape;
DimensionHandle output_rows, output_cols, output_depth;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, kernel_rows, 1, stride_rows, padding,
pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, kernel_cols, 1, stride_cols, padding,
pad_cols_before, pad_cols_after, &output_cols));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_depth_dim, kernel_depth, 1, stride_depth, padding,
0, 0, &output_depth));
TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim,
{output_rows, output_cols},
output_depth, &output_shape, c));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MaxPoolShape(shape_inference::InferenceContext* c) {
return MaxPoolShapeImpl(c, false);
}
Status MaxPoolGradShape(shape_inference::InferenceContext* c) {
return UnchangedShapeWithRank(c, 4);
}
Status MaxPoolShapeWithExplicitPadding(shape_inference::InferenceContext* c) {
return MaxPoolShapeImpl(c, true);
}
Status MaxPoolV2Shape(shape_inference::InferenceContext* c, int num_inputs) {
string data_format_str;
TensorFormat data_format;
Status s = c->GetAttr("data_format", &data_format_str);
if (s.ok()) {
FormatFromString(data_format_str, &data_format);
} else {
data_format = FORMAT_NHWC;
}
const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4;
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, input_shape, "input", c));
std::vector<int32> kernel_sizes;
std::vector<int32> strides;
if (c->num_inputs() + 2 == num_inputs) {
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
} else {
ShapeHandle size;
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 2), 1, &size));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 4, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 1), 1, &size));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 4, &unused));
const Tensor* kernel_sizes_tensor = c->input_tensor(c->num_inputs() - 2);
if (kernel_sizes_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
kernel_sizes.resize(kernel_sizes_tensor->shape().num_elements());
auto kernel_sizes_vec = kernel_sizes_tensor->flat<int32>();
std::copy_n(&kernel_sizes_vec(0), kernel_sizes.size(),
kernel_sizes.begin());
const Tensor* strides_tensor = c->input_tensor(c->num_inputs() - 1);
if (strides_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
strides.resize(strides_tensor->shape().num_elements());
auto strides_vec = strides_tensor->flat<int32>();
std::copy_n(&strides_vec(0), strides.size(), strides.begin());
}
if (strides.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the stride attribute to contain 4 values, but "
"got: ",
strides.size());
}
if (kernel_sizes.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the ksize attribute to contain 4 values, but got: ",
kernel_sizes.size());
}
int32_t stride_depth = GetTensorDim(strides, data_format, 'C');
int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
int32_t kernel_depth = GetTensorDim(kernel_sizes, data_format, 'C');
int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H');
int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W');
constexpr int num_spatial_dims = 2;
DimensionHandle batch_size_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N'));
DimensionHandle in_rows_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H'));
DimensionHandle in_cols_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W'));
DimensionHandle in_depth_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C'));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
ShapeHandle output_shape;
DimensionHandle output_rows, output_cols, output_depth;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_depth_dim, kernel_depth, stride_depth, padding, &output_depth));
TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim,
{output_rows, output_cols},
output_depth, &output_shape, c));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status Pool3DShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape));
string data_format;
Status s = c->GetAttr("data_format", &data_format);
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 5) {
return errors::InvalidArgument(
"Pool3D ops require the stride attribute to contain 5 values, but "
"got: ",
strides.size());
}
std::vector<int32> kernel_sizes;
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
if (kernel_sizes.size() != 5) {
return errors::InvalidArgument(
"Pool3D requires the ksize attribute to contain 5 values, but got: ",
kernel_sizes.size());
}
int32_t stride_planes, stride_rows, stride_cols;
int32_t kernel_planes, kernel_rows, kernel_cols;
if (s.ok() && data_format == "NCDHW") {
auto dim = [&](char dimension) {
return c->Dim(input_shape, GetTensorDimIndex<3>(FORMAT_NCHW, dimension));
};
input_shape =
c->MakeShape({{dim('N'), dim('0'), dim('1'), dim('2'), dim('C')}});
stride_planes = strides[2];
stride_rows = strides[3];
stride_cols = strides[4];
kernel_planes = kernel_sizes[2];
kernel_rows = kernel_sizes[3];
kernel_cols = kernel_sizes[4];
} else {
stride_planes = strides[1];
stride_rows = strides[2];
stride_cols = strides[3];
kernel_planes = kernel_sizes[1];
kernel_rows = kernel_sizes[2];
kernel_cols = kernel_sizes[3];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_planes_dim = c->Dim(input_shape, 1);
DimensionHandle in_rows_dim = c->Dim(input_shape, 2);
DimensionHandle in_cols_dim = c->Dim(input_shape, 3);
DimensionHandle output_depth_dim = c->Dim(input_shape, 4);
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
DimensionHandle output_planes, output_rows, output_cols;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_planes_dim, kernel_planes, stride_planes, padding, &output_planes));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols));
ShapeHandle output_shape;
if (data_format == "NCDHW") {
output_shape = c->MakeShape({batch_size_dim, output_depth_dim,
output_planes, output_rows, output_cols});
} else {
output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows,
output_cols, output_depth_dim});
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MaxPool3DGradShape(shape_inference::InferenceContext* c) {
return UnchangedShapeWithRank(c, 5);
}
Status AvgPool3DGradShape(shape_inference::InferenceContext* c) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s));
TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status UnknownShape(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->UnknownShape());
}
return absl::OkStatus();
}
template <typename T>
Status ReductionShapeHelper(const Tensor* reduction_indices_t,
const int32_t input_rank,
std::set<int64_t>* true_indices) {
auto reduction_indices = reduction_indices_t->flat<T>();
for (int i = 0; i < reduction_indices_t->NumElements(); ++i) {
const T reduction_index = reduction_indices(i);
if (reduction_index < -input_rank || reduction_index >= input_rank) {
return errors::InvalidArgument("Invalid reduction dimension ",
reduction_index, " for input with ",
input_rank, " dimensions.");
}
auto wrapped_index = reduction_index;
if (wrapped_index < 0) {
wrapped_index += input_rank;
}
true_indices->insert(wrapped_index);
}
return absl::OkStatus();
}
Status ReductionShape(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle indices;
if (c->graph_def_version() < 21) {
indices = c->input(1);
} else {
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &indices));
}
bool keep_dims;
TF_RETURN_IF_ERROR(c->GetAttr("keep_dims", &keep_dims));
const Tensor* reduction_indices_t = c->input_tensor(1);
if (reduction_indices_t == nullptr || !c->RankKnown(input)) {
if (keep_dims && c->RankKnown(input)) {
c->set_output(0, c->UnknownShapeOfRank(c->Rank(input)));
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
}
const int32_t input_rank = c->Rank(input);
std::set<int64_t> true_indices;
if (reduction_indices_t->dtype() == DataType::DT_INT32) {
TF_RETURN_IF_ERROR(ReductionShapeHelper<int32>(reduction_indices_t,
input_rank, &true_indices));
} else if (reduction_indices_t->dtype() == DataType::DT_INT64) {
TF_RETURN_IF_ERROR(ReductionShapeHelper<int64_t>(
reduction_indices_t, input_rank, &true_indices));
} else {
return errors::InvalidArgument(
"reduction_indices can only be int32 or int64");
}
std::vector<DimensionHandle> dims;
for (int i = 0; i < input_rank; ++i) {
if (true_indices.count(i) > 0) {
if (keep_dims) {
dims.emplace_back(c->MakeDim(1));
}
} else {
dims.emplace_back(c->Dim(input, i));
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
Status ConcatShapeHelper(InferenceContext* c, int start_value_index,
int end_value_index, int dim_index) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused));
const Tensor* concat_dim_t = c->input_tensor(dim_index);
if (concat_dim_t == nullptr) {
int32_t rank = InferenceContext::kUnknownRank;
for (int i = start_value_index; i < end_value_index; ++i) {
if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i));
if (rank != InferenceContext::kUnknownRank) {
break;
}
}
if (rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
} else if (rank == 0) {
return errors::InvalidArgument(
"Can't concatenate scalars (use tf.stack instead)");
} else {
for (int i = start_value_index; i < end_value_index; ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused));
}
}
std::vector<DimensionHandle> dims;
dims.reserve(rank);
for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim());
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
int64_t concat_dim;
if (concat_dim_t->dtype() == DT_INT32) {
concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0));
} else {
concat_dim = concat_dim_t->flat<int64_t>()(0);
}
const int64 min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1;
ShapeHandle output_before;
ShapeHandle output_after;
ShapeHandle input = c->input(end_value_index - 1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before));
DimensionHandle output_middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
output_after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after));
}
for (int i = end_value_index - 2; i >= start_value_index; --i) {
ShapeHandle before;
ShapeHandle after;
input = c->input(i);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before));
DimensionHandle middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after));
}
TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before));
TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle));
TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after));
}
ShapeHandle s;
TF_RETURN_IF_ERROR(
c->Concatenate(output_before, c->Vector(output_middle), &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status ConcatShape(InferenceContext* c, int num_inputs_to_concat) {
return ConcatShapeHelper(c, 1 ,
1 + num_inputs_to_concat ,
0 );
}
Status ConcatV2Shape(InferenceContext* c) {
return ConcatShapeHelper(c, 0 ,
c->num_inputs() - 1 ,
c->num_inputs() - 1 );
}
Status QuantizedConcatV2Shape(InferenceContext* c, int num_inputs_to_concat) {
return ConcatShapeHelper(c, 0 ,
num_inputs_to_concat ,
num_inputs_to_concat );
}
Status BroadcastBinaryOpOutputShapeFnHelper(InferenceContext* c,
ShapeHandle shape_x,
ShapeHandle shape_y,
bool incompatible_shape_error,
ShapeHandle* out) {
CHECK_NOTNULL(out);
if (!c->RankKnown(shape_x) || !c->RankKnown(shape_y)) {
*out = c->UnknownShape();
return absl::OkStatus();
}
const int32_t rank_x = c->Rank(shape_x);
const int32_t rank_y = c->Rank(shape_y);
const int32_t rank_out = std::max(rank_x, rank_y);
std::vector<DimensionHandle> dims;
DimensionHandle dim_one;
if (rank_x != rank_y) dim_one = c->MakeDim(1);
for (int i = 0; i < rank_out; ++i) {
const auto dim_x = i < (rank_out - rank_x)
? dim_one
: c->Dim(shape_x, i - (rank_out - rank_x));
const bool dim_y_is_one = (i < (rank_out - rank_y));
const auto dim_y =
dim_y_is_one ? dim_one : c->Dim(shape_y, i - (rank_out - rank_y));
if (!c->ValueKnown(dim_x) || !c->ValueKnown(dim_y)) {
if (c->Value(dim_x) > 1) {
if (!incompatible_shape_error) {
*out = c->UnknownShape();
return absl::OkStatus();
}
dims.push_back(dim_x);
} else if (c->Value(dim_y) > 1) {
if (!incompatible_shape_error) {
*out = c->UnknownShape();
return absl::OkStatus();
}
dims.push_back(dim_y);
} else if (c->Value(dim_x) == 1) {
dims.push_back(dim_y);
} else if (c->Value(dim_y) == 1) {
dims.push_back(dim_x);
} else if (dim_y.SameHandle(dim_x)) {
dims.push_back(dim_x);
} else if (!c->ValueKnown(dim_x) && !c->ValueKnown(dim_y)) {
dims.push_back(c->UnknownDim());
} else {
if (!incompatible_shape_error) {
*out = c->UnknownShape();
return absl::OkStatus();
}
dims.push_back(c->UnknownDim());
}
} else if (c->Value(dim_x) == 1 || c->Value(dim_y) == 1) {
if (c->Value(dim_x) == 1 && !dim_y_is_one) {
dims.push_back(dim_y);
} else {
DCHECK_EQ(c->Value(dim_y), 1);
dims.push_back(dim_x);
}
} else {
DimensionHandle dim;
Status s = c->Merge(dim_x, dim_y, &dim);
if (!s.ok()) {
if (!incompatible_shape_error) {
*out = c->MakeShape({});
return absl::OkStatus();
}
return s;
}
dims.push_back(dim);
}
}
*out = c->MakeShape(dims);
return absl::OkStatus();
}
Status RandomShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status SegmentReductionWithNumSegmentsShapeFn(InferenceContext* c) {
ShapeHandle s_data = c->input(0);
ShapeHandle s_segment_ids = c->input(1);
ShapeHandle s_num_segments = c->input(2);
TF_RETURN_IF_ERROR(c->WithRank(s_num_segments, 0, &s_num_segments));
ShapeHandle out;
if (c->RankKnown(s_segment_ids)) {
TF_RETURN_IF_ERROR(
c->MergePrefix(s_data, s_segment_ids, &s_data, &s_segment_ids));
DimensionHandle num_segments_dim;
TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(2, &num_segments_dim));
ShapeHandle s_data_suffix;
TF_RETURN_IF_ERROR(
c->Subshape(s_data, c->Rank(s_segment_ids), &s_data_suffix));
TF_RETURN_IF_ERROR(
c->Concatenate(c->Vector(num_segments_dim), s_data_suffix, &out));
} else {
out = c->UnknownShape();
}
c->set_output(0, out);
return absl::OkStatus();
}
namespace {
template <typename T>
Status SliceHelper(InferenceContext* c, ShapeHandle begin_value,
const Tensor* sizes_value,
std::vector<DimensionHandle>* dims) {
auto sizes_vec = sizes_value->vec<T>();
for (int i = 0; i < sizes_value->NumElements(); ++i) {
DimensionHandle dim = c->Dim(c->input(0), i);
if (sizes_vec(i) != -1) {
auto dim_val = c->Value(dim);
if (sizes_vec(i) < 0) {
return errors::InvalidArgument(
"Out of bounds slicing on dimension ", i, " of length ", dim_val,
": sizes vector cannot be < -1, but was ", sizes_vec(i));
}
dims->emplace_back(c->MakeDim(sizes_vec(i)));
} else {
DimensionHandle result;
TF_RETURN_IF_ERROR(c->Subtract(dim, c->Dim(begin_value, i), &result));
dims->emplace_back(result);
}
}
return absl::OkStatus();
}
}
Status SliceShape(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle begin_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &begin_shape));
ShapeHandle sizes_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &sizes_shape));
TF_RETURN_IF_ERROR(c->Merge(begin_shape, sizes_shape, &begin_shape));
DimensionHandle ndims = c->Dim(begin_shape, 0);
if (c->ValueKnown(ndims)) {
TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(ndims), &input));
}
ShapeHandle begin_value;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &begin_value));
const Tensor* sizes_value = c->input_tensor(2);
if (sizes_value != nullptr) {
TF_RETURN_IF_ERROR(
c->WithRank(begin_value, sizes_value->NumElements(), &begin_value));
std::vector<DimensionHandle> dims;
if (sizes_value->dtype() == DT_INT64) {
TF_RETURN_IF_ERROR(
SliceHelper<int64_t>(c, begin_value, sizes_value, &dims));
} else {
TF_RETURN_IF_ERROR(
SliceHelper<int32>(c, begin_value, sizes_value, &dims));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
} else {
ShapeHandle sizes_value;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &sizes_value));
if (c->RankKnown(sizes_value)) {
TF_RETURN_IF_ERROR(
c->WithRank(begin_value, c->Rank(sizes_value), &begin_value));
std::vector<DimensionHandle> dims;
dims.reserve(c->Rank(sizes_value));
for (int i = 0; i < c->Rank(sizes_value); ++i) {
dims.emplace_back(c->Dim(sizes_value, i));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
if (c->RankKnown(input)) {
c->set_output(0, c->UnknownShapeOfRank(c->Rank(input)));
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
}
return absl::OkStatus();
}
Status ValidateSparseTensor(InferenceContext* c, ShapeHandle indices_shape,
ShapeHandle values_shape, ShapeHandle shape_shape) {
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(c->WithRank(indices_shape, 2, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(values_shape, 1, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(shape_shape, 1, &unused_shape));
DimensionHandle num_index_elements_dim = c->Dim(indices_shape, 0);
if (c->ValueKnown(num_index_elements_dim)) {
DimensionHandle num_values_elements_dim = c->Dim(values_shape, 0);
if (c->ValueKnown(num_values_elements_dim)) {
int64_t num_index_elements = c->Value(num_index_elements_dim);
int64_t num_values_elements = c->Value(num_values_elements_dim);
if (num_index_elements != num_values_elements) {
return errors::InvalidArgument("Number of elements in index (",
num_index_elements, ") and values (",
num_values_elements, ") do not match.");
}
}
}
DimensionHandle index_rank_dim = c->Dim(indices_shape, 1);
if (c->ValueKnown(index_rank_dim)) {
DimensionHandle shape_rank_dim = c->Dim(shape_shape, 0);
if (c->ValueKnown(shape_rank_dim)) {
int64_t index_rank = c->Value(index_rank_dim);
int32_t shape_rank = c->Value(shape_rank_dim);
if (index_rank != shape_rank) {
return errors::InvalidArgument("Index rank (", index_rank,
") and shape rank (", shape_rank,
") do not match.");
}
}
}
return absl::OkStatus();
}
Status ValidateVariableResourceHandle(
InferenceContext* c, std::vector<ShapeAndType>* shape_and_type) {
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data == nullptr || handle_data->empty()) {
shape_and_type->emplace_back(c->UnknownShape(), DT_INVALID);
} else {
*shape_and_type = *handle_data;
DataType value_dtype;
TF_RETURN_IF_ERROR(c->GetAttr("dtype", &value_dtype));
if (shape_and_type->at(0).dtype != value_dtype) {
return errors::InvalidArgument(
"Trying to read variable with wrong dtype. "
"Expected ",
DataTypeString(shape_and_type->at(0).dtype), " got ",
DataTypeString(value_dtype));
}
}
return absl::OkStatus();
}
Status GatherNdShape(InferenceContext* c) {
ShapeHandle params;
std::vector<ShapeAndType> handle_shape_and_type;
if (c->input_handle_shapes_and_types(0) != nullptr) {
TF_RETURN_IF_ERROR(
ValidateVariableResourceHandle(c, &handle_shape_and_type));
params = handle_shape_and_type[0].shape;
} else {
params = c->input(0);
}
ShapeHandle indices;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &indices));
DimensionHandle r_dim = c->Dim(indices, -1);
if (!c->RankKnown(params) || !c->ValueKnown(r_dim)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
if (c->Value(r_dim) > c->Rank(params)) {
return errors::InvalidArgument(
"indices.shape[-1] must be <= params.rank, but saw indices shape: ",
c->DebugString(indices), " and params shape: ", c->DebugString(params));
}
ShapeHandle indices_slice;
ShapeHandle params_slice;
TF_RETURN_IF_ERROR(c->Subshape(indices, 0, -1, &indices_slice));
TF_RETURN_IF_ERROR(c->Subshape(params, c->Value(r_dim), ¶ms_slice));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(indices_slice, params_slice, &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status ScatterNdShapeHelper(InferenceContext* c, ShapeHandle indices_shape,
ShapeHandle updates_shape,
ShapeHandle input_shape) {
if (c->Value(c->NumElements(input_shape)) == 0 &&
(c->Value(c->NumElements(indices_shape)) > 0 ||
c->Value(c->NumElements(updates_shape)) > 0)) {
return errors::InvalidArgument(
"Indices and updates specified for empty input");
}
if (c->RankKnown(indices_shape) && c->RankKnown(updates_shape) &&
c->Rank(updates_shape) != 0) {
const int64_t outer_dims = c->Rank(indices_shape) - 1;
const DimensionHandle ixdim = c->Dim(indices_shape, -1);
if (c->ValueKnown(ixdim)) {
int64_t ix = c->Value(ixdim);
ShapeHandle unused;
ShapeHandle prefix_indices;
TF_RETURN_IF_ERROR(
c->Subshape(indices_shape, 0, outer_dims, &prefix_indices));
ShapeHandle prefix_updates;
TF_RETURN_IF_ERROR(
c->Subshape(updates_shape, 0, outer_dims, &prefix_updates));
Status s = c->Merge(prefix_indices, prefix_updates, &unused);
if (!s.ok()) {
return errors::InvalidArgument(
"Dimensions [0,", outer_dims,
") of indices[shape=", c->DebugString(indices_shape),
"] = ", c->DebugString(prefix_indices),
" must match dimensions [0,", outer_dims,
") of updates[shape=", c->DebugString(updates_shape),
"] = ", c->DebugString(prefix_updates), ": ", s.message());
}
ShapeHandle suffix_output;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, ix, &suffix_output));
ShapeHandle suffix_updates;
TF_RETURN_IF_ERROR(
c->Subshape(updates_shape, outer_dims, &suffix_updates));
s = c->Merge(suffix_output, suffix_updates, &unused);
if (!s.ok()) {
return errors::InvalidArgument(
"Dimensions [", ix, ",", c->Rank(input_shape),
") of input[shape=", c->DebugString(input_shape),
"] = ", c->DebugString(suffix_output), " must match dimensions [",
outer_dims, ",", c->Rank(updates_shape),
") of updates[shape=", c->DebugString(updates_shape),
"] = ", c->DebugString(suffix_updates), ": ", s.message());
}
}
}
if (c->input_handle_shapes_and_types(0) == nullptr && c->num_outputs() > 0) {
c->set_output(0, input_shape);
}
return absl::OkStatus();
}
Status ExplicitShape(InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status ExplicitShapes(InferenceContext* c) {
std::vector<PartialTensorShape> shapes;
TF_RETURN_IF_ERROR(c->GetAttr("shapes", &shapes));
if (shapes.empty()) {
return errors::Internal("shapes attribute is empty");
}
for (int i = 0, end = shapes.size(); i < end; ++i) {
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(
c->MakeShapeFromPartialTensorShape(shapes[i], &output_shape));
c->set_output(i, output_shape);
}
return absl::OkStatus();
}
Status SparseReduceShapeFn(InferenceContext* c) {
bool keep_dims = false;
TF_RETURN_IF_ERROR(c->GetAttr("keep_dims", &keep_dims));
const Tensor* shape_tensor = c->input_tensor(2);
const Tensor* axes_tensor = c->input_tensor(3);
if (shape_tensor != nullptr && axes_tensor != nullptr) {
auto shape_vec = shape_tensor->flat<int64_t>();
auto axes_vec = axes_tensor->flat<int32>();
int64_t ndims = shape_vec.size();
absl::flat_hash_set<int64_t> axes;
if (ndims == 0)
return errors::InvalidArgument(
"Number of dims in shape tensor must not be 0");
for (int i = 0; i < axes_vec.size(); i++) {
axes.insert((axes_vec(i) + ndims) % ndims);
}
std::vector<DimensionHandle> dims;
if (keep_dims) {
dims.reserve(ndims);
for (int d = 0; d < ndims; ++d) {
if (axes.find(d) == axes.end()) {
dims.push_back(c->MakeDim(shape_vec(d)));
} else {
dims.push_back(c->MakeDim(1));
}
}
} else {
for (int d = 0; d < ndims; ++d) {
if (axes.find(d) == axes.end()) {
dims.push_back(c->MakeDim(shape_vec(d)));
}
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
return UnknownShape(c);
}
Status QuantizedConv2DShape(InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::Conv2DShape(c));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused));
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
return absl::OkStatus();
}
Status FusedQuantizedConvShape(InferenceContext* c, int num_dims) {
std::vector<string> fused_ops;
TF_RETURN_IF_ERROR(c->GetAttr("fused_ops", &fused_ops));
ShapeHandle unused, channel;
bool fused_sum, fused_bias, fused_requantize;
fused_sum =
std::find(fused_ops.begin(), fused_ops.end(), "Sum") != fused_ops.end();
fused_bias = std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") !=
fused_ops.end();
fused_requantize = std::find(fused_ops.begin(), fused_ops.end(),
"Requantize") != fused_ops.end();
const int kMinInputBaseIdx = 2;
const int kMinFilterBaseIdx = 4;
int min_input_filter_offset = 0;
if (fused_bias && !fused_sum) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
min_input_filter_offset = 1;
} else if (fused_sum && !fused_bias) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), num_dims, &unused));
min_input_filter_offset = 1;
} else if (fused_bias && fused_sum) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), num_dims, &unused));
min_input_filter_offset = 2;
}
TF_RETURN_IF_ERROR(
c->WithRank(c->input(kMinInputBaseIdx + min_input_filter_offset), 0,
&unused));
TF_RETURN_IF_ERROR(
c->WithRank(c->input(kMinInputBaseIdx + min_input_filter_offset + 1), 0,
&unused));
TF_RETURN_IF_ERROR(
c->WithRankAtMost(c->input(kMinFilterBaseIdx + min_input_filter_offset),
1, &channel));
TF_RETURN_IF_ERROR(c->WithRankAtMost(
c->input(kMinFilterBaseIdx + min_input_filter_offset + 1), 1,
&channel));
if (fused_requantize) {
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
} else {
c->set_output(1, channel);
c->set_output(2, channel);
}
return absl::OkStatus();
}
Status FusedQuantizedConv2DShape(InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::Conv2DShapeImpl(c, true));
TF_RETURN_IF_ERROR(FusedQuantizedConvShape(c, 4));
return absl::OkStatus();
}
Status FusedQuantizedDepthwiseConv2D(InferenceContext* c) {
TF_RETURN_IF_ERROR(DepthwiseConv2DNativeShapeImpl(c, true));
TF_RETURN_IF_ERROR(FusedQuantizedConvShape(c, 4));
return absl::OkStatus();
}
Status QuantizedAvgPoolShape(InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::AvgPoolShape(c));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
return absl::OkStatus();
}
Status QuantizeV2Shape(InferenceContext* c) {
int axis = -1;
Status s = c->GetAttr("axis", &axis);
if (!s.ok() && s.code() != error::NOT_FOUND) {
return s;
}
if (axis < -1) {
return errors::InvalidArgument("axis should be at least -1, got ", axis);
}
const int minmax_rank = (axis == -1) ? 0 : 1;
TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c));
ShapeHandle minmax;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax));
if (axis != -1) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input));
DimensionHandle depth;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth));
}
c->set_output(1, minmax);
c->set_output(2, minmax);
return absl::OkStatus();
}
Status ReduceScatterShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle in = c->input(0);
if (!c->RankKnown(in)) {
c->set_output(0, in);
return absl::OkStatus();
}
shape_inference::ShapeHandle group_assignment_shape = c->input(1);
if (c->Rank(group_assignment_shape) != 2)
return errors::InvalidArgument(
"ReduceScatter group_assignment should be rank 2");
const Tensor* scatter_dimension = c->input_tensor(2);
if (!scatter_dimension) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int64_t scatter_dim;
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(scatter_dimension, &scatter_dim));
std::vector<shape_inference::DimensionHandle> out_dims;
out_dims.reserve(c->Rank(in));
for (int i = 0; i < c->Rank(in); ++i) {
if (i == scatter_dim) {
shape_inference::DimensionHandle dim = c->Dim(in, i);
shape_inference::DimensionHandle out_dim;
TF_RETURN_IF_ERROR(c->Divide(dim, c->Dim(group_assignment_shape, 1),
true, &out_dim));
out_dims.push_back(out_dim);
} else {
out_dims.emplace_back(c->Dim(in, i));
}
}
c->set_output(0, c->MakeShape(out_dims));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/framework/common_shape_fns.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace shape_inference {
namespace {
PartialTensorShape S(std::initializer_list<int64_t> dims) {
return PartialTensorShape(dims);
}
PartialTensorShape Unknown() { return PartialTensorShape(); }
OpDef MakeOpDef(int num_inputs, int num_outputs) {
OpRegistrationData op_reg_data;
OpDefBuilder b("dummy");
for (int i = 0; i < num_inputs; ++i) {
b.Input(strings::StrCat("i", i, ": float"));
}
for (int i = 0; i < num_outputs; ++i) {
b.Output(strings::StrCat("o", i, ": float"));
}
CHECK(b.Attr("foo:string").Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
}
TEST(CommonShapeFnsTest, NoOutputShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("Assert")
.Input("condition: bool")
.Input("data: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "Assert")
.Input("condition", 0, DT_BOOL)
.Input({{"data", 0, DT_FLOAT}})
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({}), S({10})}, {},
{}, {});
TF_EXPECT_OK(NoOutputs(&c));
EXPECT_EQ(0, c.num_outputs());
}
TEST(CommonShapeFnsTest, ScalarShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("L2Loss")
.Input("t: float")
.Output("t: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(
NodeDefBuilder("test", "L2Loss").Input("t", 0, DT_FLOAT).Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({})}, {}, {}, {});
TF_EXPECT_OK(ScalarShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(0, c.Rank(output));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({1, 23, 4, 4, 2})},
{}, {}, {});
TF_EXPECT_OK(ScalarShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(0, c.Rank(output));
}
}
TEST(CommonShapeFnsTest, MatMulShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("MatMul")
.Input("a: float")
.Input("b: float")
.Output("c: float")
.Attr("transpose_a:bool=false")
.Attr("transpose_b:bool=false")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "MatMul")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("transpose_a", false)
.Attr("transpose_b", false)
.Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3}), S({3, 4})}, {}, {}, {});
TF_EXPECT_OK(MatMulShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, -1}), S({3, 4})}, {}, {}, {});
TF_EXPECT_OK(MatMulShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2}), S({3, 4})},
{}, {}, {});
auto s = MatMulShape(&c);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "Shape must be rank 2 but is rank 1"));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3}), S({3, -1})}, {}, {}, {});
TF_EXPECT_OK(MatMulShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_FALSE(c.ValueKnown(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 5}), S({3, 4})}, {}, {}, {});
auto s = MatMulShape(&c);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(),
"Dimensions must be equal, but are 5 and 3"));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 5, 3}), S({3, 5, 4})}, {}, {}, {});
auto s = MatMulShape(&c);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "Shape must be rank 2 but is rank 3"));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "MatMul")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Attr("type", DT_FLOAT)
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({3, 2}), S({3, 4})}, {}, {}, {});
auto s = MatMulShape(&c);
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "MatMul")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("transpose_a", false)
.Attr("transpose_b", true)
.Attr("type", DT_FLOAT)
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3}), S({4, 3})}, {}, {}, {});
auto s = MatMulShape(&c);
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
}
TEST(CommonShapeFnsTest, Einsum_ShapeFn) {
ShapeInferenceTestOp op("Einsum");
auto set_equation = [&op](int n, string equation) {
std::vector<NodeDefBuilder::NodeOut> input_list;
input_list.reserve(n);
for (int i = 0; i < n; ++i) {
input_list.emplace_back("a", 0, DT_FLOAT);
}
TF_ASSERT_OK(NodeDefBuilder("test", "Einsum")
.Input(input_list)
.Attr("equation", equation)
.Finalize(&op.node_def));
};
set_equation(1, "abc->c");
INFER_OK(op, "[?,?,?]", "[d0_2]");
set_equation(1, "abc->aabbcc");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_0,d0_1,d0_1,d0_2,d0_2]");
set_equation(1, "abc->");
INFER_OK(op, "[?,?,?]", "[]");
set_equation(1, "->");
INFER_OK(op, "[]", "[]");
set_equation(2, "ij,jk->ik");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
set_equation(2, "ij,jk->ik");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
set_equation(2, "ab,ab->");
INFER_OK(op, "[?,?];[?,?]", "[]");
set_equation(2, "ab,->b");
INFER_OK(op, "[?,?];[]", "[d0_1]");
set_equation(2, ",->");
INFER_OK(op, "[];[]", "[]");
set_equation(2, "aaa,b->abbb");
INFER_OK(op, "[?,?,?];[?]", "[d0_0,d1_0,d1_0,d1_0]");
set_equation(2, ",abcd->badc");
INFER_OK(op, "[];[?,?,?,?]", "[d1_1,d1_0,d1_3,d1_2]");
set_equation(1, "a...bc->c...");
INFER_OK(op, "[?,?,?,?,?]", "[d0_4,d0_1,d0_2]");
set_equation(2, "...ij,...jk->...ik");
INFER_OK(op, "[?,?,?,?,?];[1,?,?]", "[d0_0,d0_1,d0_2,d0_3,d1_2]");
INFER_OK(op, "[1,?,?];[?,?,?,?,?]", "[d1_0,d1_1,d1_2,d0_1,d1_4]");
set_equation(1, "abc->c");
INFER_OK(op, "?", "[?]");
set_equation(1, "a...bc->c");
INFER_OK(op, "?", "[?]");
set_equation(1, "a...bc->c...");
INFER_OK(op, "?", "?");
set_equation(2, "...ij,...jk->...ik");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?,?,?];?", "?");
INFER_OK(op, "?;[?,?,?]", "?");
set_equation(2, "...ij,...jk->ik");
INFER_OK(op, "?;?", "[?,?]");
set_equation(2, "abd,b...c->...cad");
INFER_OK(op, "[?,?,?];[?,?,?,?]", "[d1_1,d1_2,d1_3,d0_0,d0_2]");
set_equation(2, "...ab,b...c->ac...");
INFER_OK(op, "[?,1,?,?];[?,?,?]", "[d0_2,d1_2,d0_0,d1_1]");
set_equation(2, "ab->b");
INFER_ERROR("got: 2", op, "[?,?];[?,?]");
set_equation(1, "ab,a->b");
INFER_ERROR("got: 1", op, "[?,?]");
set_equation(1, "a");
INFER_ERROR("equation", op, "[2]");
set_equation(2, "ab,bc");
INFER_ERROR("equation", op, "[2,2];[2,2]");
set_equation(1, "..a.->a...");
INFER_ERROR("ellipsis", op, "[1,1,2,1]");
set_equation(1, "...a->.a..");
INFER_ERROR("ellipsis", op, "[1,1,1,2]");
set_equation(1, "...a...->...a");
INFER_ERROR("ellipsis", op, "[1,1,1,2]");
set_equation(1, "..a..b..->...ab");
INFER_ERROR("ellipsis", op, "[1,1,2,1]");
set_equation(2, "...a...,ab->a");
INFER_ERROR("ellipsis", op, "[1,2,1];[2,1]");
set_equation(2, "a,...ab...->a");
INFER_ERROR("ellipsis", op, "[2];[1,2,1,1]");
set_equation(2, "a,ab->a......");
INFER_ERROR("ellipsis", op, "[2];[2,1]");
set_equation(1, "abc->d");
INFER_ERROR("'d'", op, "[?,?,?]");
set_equation(1, "abc->c");
INFER_ERROR("4", op, "[?,?,?,?]");
INFER_ERROR("2", op, "[?,?]");
set_equation(1, "...abc->...c");
INFER_ERROR("2", op, "[?,?]");
set_equation(2, "ab,ab->a");
INFER_ERROR("are 1 and 2", op, "[1,2];[2,1]");
set_equation(2, "aa,bb->a");
INFER_ERROR("are 1 and 2", op, "[1,2];[2,2]");
set_equation(2, "...ij,...jk->...ik");
INFER_ERROR("are 2 and 3", op, "[2,?,?];[3,?,?]");
set_equation(2, "i...j,jk...->...ik");
INFER_ERROR("are 2 and 3", op, "[?,2,?];[?,?,3]");
set_equation(2, "...ij,...jk->ik");
set_equation(2, "i...j,jk...->ik");
INFER_ERROR("non-empty broadcasting", op, "[?,2,?];[?,?]");
set_equation(2, "...ab,b...c->ac...");
INFER_OK(op, "?;[4,5,3]", "?");
}
TEST(CommonShapeFnsTest, BatchMatMulV2_ShapeFn) {
ShapeInferenceTestOp op("BatchMatMulV2");
auto set_adj = [&op](bool adj_x, bool adj_y) {
TF_ASSERT_OK(NodeDefBuilder("test", "BatchMatMulV2")
.Input({"a", 0, DT_FLOAT})
.Input({"b", 0, DT_FLOAT})
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(&op.node_def));
};
set_adj(false, false);
INFER_ERROR("at least rank 2", op, "[];?");
INFER_ERROR("at least rank 2", op, "[1];?");
INFER_ERROR("at least rank 2", op, "?;[]");
INFER_ERROR("at least rank 2", op, "?;[2]");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[3,?,?];[3,?,?]", "[d0_0,d0_1,d1_2]");
INFER_OK(op, "[?,?,?];[1,?,?]", "[d0_0,d0_1,d1_2]");
INFER_OK(op, "[?,?,?];[2,?,?]", "[d1_0,d0_1,d1_2]");
INFER_OK(op, "[1,?,?];[?,?,?]", "[d1_0,d0_1,d1_2]");
INFER_OK(op, "[2,?,?];[?,?,?]", "[d0_0,d0_1,d1_2]");
INFER_OK(op, "[?,?,?];[?,?,?]", "[?,d0_1,d1_2]");
INFER_OK(op, "[?,?];[?,?,?]", "[d1_0,d0_0,d1_2]");
INFER_OK(op, "[?,?,?];[?,?]", "[d0_0,d0_1,d1_1]");
INFER_OK(op, "[?,?];[?,?,?,?]", "[d1_0,d1_1,d0_0,d1_3]");
INFER_OK(op, "[?,?,?,?];[?,?]", "[d0_0,d0_1,d0_2,d1_1]");
INFER_OK(op, "[?,?];?", "?");
INFER_OK(op, "?;[?,?]", "?");
INFER_OK(op, "[?,?,?,?];?", "?");
INFER_OK(op, "[?,?,?,?,?];[1,?,?]", "[d0_0,d0_1,d0_2,d0_3,d1_2]");
INFER_OK(op, "[1,?,?];[?,?,?,?,?]", "[d1_0,d1_1,d1_2,d0_1,d1_4]");
INFER_ERROR("are 2 and 3", op, "[?,?,2,?,?];[3,?,?]");
INFER_ERROR("are 2 and 3", op, "[2,?,?];[?,?,3,?,?]");
set_adj(false, false);
INFER_OK(op, "[2,2,3,4];[2,2,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,3,1]");
set_adj(true, false);
INFER_OK(op, "[2,2,3,4];[2,2,?,?]", "[d0_0,d0_1,d0_3,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,3,1]");
set_adj(false, true);
INFER_OK(op, "[2,2,?,?];[2,2,3,4]", "[d0_0,d0_1,d0_2,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,1,3]");
set_adj(true, true);
INFER_OK(op, "[2,2,?,?];[2,2,3,4]", "[d0_0,d0_1,d0_3,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,1,3]");
}
TEST(CommonShapeFnsTest, BiasAddShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("BiasAdd")
.Input("a: float")
.Input("b: float")
.Output("c: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 10}), S({10})},
{}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(10, c.Value(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{Unknown(), Unknown()}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_FALSE(c.RankKnown(output));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({4, 3, 4, 2, 15}), S({15})}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ("[4,3,4,2,15]", c.DebugString(output));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3, 4, 5}), S({3})}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ("[2,3,4,5]", c.DebugString(output));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({8, 6, 4, 2, 3, 4, 5}), S({3})}, {}, {}, {});
EXPECT_FALSE(BiasAddShape(&c).ok());
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({10, 11, 12}), S({11})}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ("[10,11,12]", c.DebugString(output));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({3}), S({3})}, {},
{}, {});
EXPECT_FALSE(BiasAddShape(&c).ok());
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 3}), S({3})},
{}, {}, {});
EXPECT_FALSE(BiasAddShape(&c).ok());
}
}
TEST(CommonShapeFnsTest, FusedBatchNormExTest) {
ShapeInferenceTestOp op("_FusedBatchNormEx");
std::vector<NodeDefBuilder::NodeOut> no_side_inputs;
TF_CHECK_OK(NodeDefBuilder("test", "_FusedBatchNormEx")
.Input("x", 0, DT_HALF)
.Input("scale", 0, DT_FLOAT)
.Input("offset", 0, DT_FLOAT)
.Input("mean", 0, DT_FLOAT)
.Input("variance", 0, DT_FLOAT)
.Input(no_side_inputs)
.Attr("T", DT_HALF)
.Attr("U", DT_FLOAT)
.Attr("epsilon", 0.001)
.Attr("data_format", "NHWC")
.Attr("activation_mode", "Relu")
.Attr("num_side_inputs", 0)
.Attr("is_training", true)
.Finalize(&op.node_def));
INFER_ERROR("must be divisible by 4", op, "[2,2,2,2];[2];[2];[2];[2]");
INFER_OK(op, "[2,2,2,4];[4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3];[d0_3];[d0_3];[d0_3];[d0_3];?");
}
TEST(CommonShapeFnsTest, BiasAddGradShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("BiasAddGrad")
.Input("a: float")
.Output("b: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 10})}, {}, {},
{});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(10, c.Value(c.Dim(output, 0)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({5, 7, 2, 10})},
{}, {}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(10, c.Value(c.Dim(output, 0)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 3, 4, 5})}, {},
{}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(3, c.Value(c.Dim(output, 0)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({8, 6, 4, 2, 3, 4, 5})}, {}, {}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(6, c.Value(c.Dim(output, 0)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({10, 11, 12})}, {},
{}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(11, c.Value(c.Dim(output, 0)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({3})}, {}, {}, {});
EXPECT_FALSE(BiasAddGradShape(&c).ok());
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 3})}, {}, {},
{});
EXPECT_FALSE(BiasAddGradShape(&c).ok());
}
}
TEST(CommonShapeFnsTest, ConvTest) {
ShapeInferenceTestOp op("Conv");
auto set_op = [&op](const std::vector<int32>& strides, const string& padding,
string data_format, int batch_dims, int groups) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Attr("batch_dims", batch_dims)
.Attr("groups", groups)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 1);
INFER_ERROR("Input tensor rank must be the same as filter rank.", op,
"[2,2,1,1,1];[2,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", -1, 1);
INFER_ERROR("must be non-negative", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 5, 1);
INFER_ERROR(
"Input tensor must be rank 4 or 5, excluding extra "
"batch dimensions, but got: 0",
op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 1);
INFER_ERROR("extra batch dimensions", op, "[1,2,3];[1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 1);
INFER_ERROR("extra batch dimensions", op, "[1,2,3,4,5,6];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 0);
INFER_ERROR("should be a positive integer", op, "[1,2,3,4,5];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, -1);
INFER_ERROR("should be a positive integer", op, "[1,2,3,4,5];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 3);
INFER_ERROR("should divide input depth", op, "[1,1,1,1,13];[3,3,3,13,3]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 3);
INFER_ERROR("should divide output depth", op, "[3,3,3,3,3];[1,1,1,3,13]");
set_op({{1, 1, 1, 1, 1}}, "SAME",
"CHANNELS_LAST", 1, 2);
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "SAME",
"CHANNELS_LAST", 1, 1);
INFER_ERROR(
"Input depth divided by filter input depth does not match with groups "
"parameter (1)",
op, "[1,4,4,4,10];[2,2,2,5,2]");
set_op({{1, 1, 1, 1, 1}}, "SAME",
"CHANNELS_LAST", 1, 10);
INFER_OK(op, "[1,4,4,4,10];[2,2,2,1,10]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
}
TEST(CommonShapeFnsTest, Conv2DFormatsTest) {
ShapeInferenceTestOp op("Conv2D");
auto set_op = [&op](const std::vector<int32>& strides, const string& padding,
const string& data_format, const string& filter_format,
const std::vector<int32>& explicit_paddings = {}) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("data_format", data_format)
.Attr("filter_format", filter_format)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,2,2,4];[4,1,1,1,4]", "[d0_0,1,2,2,d0_4]");
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,2,2,4];[4,1,2,2,4]", "[d0_0,1,1,1,d0_4]");
set_op({{1, 1, 2, 2}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,3,3,4];[8,1,1,1,4]", "[d0_0,2,2,2,d0_4]");
set_op({{1, 1, 2, 1}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,3,3,4];[4,1,1,1,4]", "[d0_0,1,2,3,d0_4]");
set_op({{1, 1, 1, 2}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,4,4,4];[4,1,2,1,4]", "[d0_0,1,3,2,d0_4]");
set_op({{1, 1, 1, 2}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,4,4,32];[32,1,2,1,32]", "[d0_0,1,3,2,d0_4]");
}
class Conv2DShapeTest : public ::testing::TestWithParam<string> {};
TEST_P(Conv2DShapeTest, Conv2DShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& strides, const string& padding,
const string& data_format, const string& filter_format,
const std::vector<int32>& explicit_paddings = {}) {
string format;
if (op.name == "Conv")
format = (data_format == "NHWC") ? "CHANNELS_LAST" : "CHANNELS_FIRST";
else
format = data_format;
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("data_format", format)
.Attr("filter_format", filter_format)
.Finalize(&op.node_def));
};
set_op({{1, 1, 0, 1}}, "VALID",
"NHWC", "HWIO");
INFER_ERROR("must be rank 4", op, "[4,4];[2,1,1,1]");
INFER_ERROR("must be rank 4", op, "[1,4,4,1];[2,1,1]");
set_op({{1, 1, 0, 1}}, "VALID",
"NHWC", "HWIO");
INFER_ERROR("must be > 0", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,2,2,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 1, 1, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,2,2,1];[2,2,1,1]", "[d0_0,1,1,d1_3]");
set_op({{1, 2, 2, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,3,3,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 1, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,3,3,1];[1,1,1,1]", "[d0_0,2,3,d1_3]");
set_op({{1, 1, 2, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,4,4,1];[2,1,1,1]", "[d0_0,3,2,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,1,1,1]", "[d0_0,3,2,d1_3]");
INFER_OK(op, "[1,?,4,1];[2,1,1,1]", "[d0_0,?,2,d1_3]");
INFER_OK(op, "[1,4,?,1];[2,1,1,1]", "[d0_0,3,?,d1_3]");
INFER_OK(op, "[1,4,4,?];[2,1,1,1]", "[d0_0,3,2,d1_3]");
INFER_OK(op, "[1,4,4,1];[?,1,1,1]", "[d0_0,?,2,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,?,1,1]", "[d0_0,3,?,d1_3]");
INFER_ERROR(
"Depth of input (10) is not a multiple of input depth of filter (10000)",
op, "[1,2,2,10];[1,1,10000,20]");
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,2,2];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,2,2];[2,2,1,1]", "[d0_0,d1_3,1,1]");
set_op({{1, 1, 2, 2}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,3,3];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 2, 1}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,3,3];[1,1,1,1]", "[d0_0,d1_3,2,3]");
set_op({{1, 1, 1, 2}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,4,4];[2,1,1,1]", "[d0_0,d1_3,3,2]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 2, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[1,?,4,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[1,4,?,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[1,4,4,?];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[?,4,4,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 2, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[?,4,4,1];[?,?,?,?]", "[d0_0,2,2,d1_3]");
INFER_OK(op, "[1,?,4,1];[?,?,?,?]", "[d0_0,?,2,d1_3]");
INFER_OK(op, "[1,4,?,1];[?,?,?,?]", "[d0_0,2,?,d1_3]");
INFER_OK(op, "[1,4,4,?];[?,?,?,?]", "[d0_0,2,2,d1_3]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, 2, 1, 4, 0, 0});
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,6,9,d1_3]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 1, 0, 1, 2, 0, 0});
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,3,5,d1_3]");
set_op({{1, 2, 2, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 3, 2, 1, 0, 0, 0});
INFER_OK(op, "[1,4,4,2];[2,2,2,3]", "[d0_0,4,2,d1_3]");
set_op({{1, 1, 2, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 1, 1, 2, 2, 0, 0});
INFER_OK(op, "[1,2,2,1];[2,1,1,1]", "[d0_0,3,3,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,1,1,1]", "[d0_0,5,4,d1_3]");
INFER_OK(op, "[1,?,4,1];[2,1,1,1]", "[d0_0,?,4,d1_3]");
INFER_OK(op, "[1,4,?,1];[2,1,1,1]", "[d0_0,5,?,d1_3]");
INFER_OK(op, "[1,4,4,?];[2,1,1,1]", "[d0_0,5,4,d1_3]");
INFER_OK(op, "[1,4,4,1];[?,1,1,1]", "[d0_0,?,4,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,?,1,1]", "[d0_0,5,?,d1_3]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, -1, 0, 0, 0, 0});
INFER_ERROR("must be nonnegative", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("must contain 8 values", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT", "NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("must contain 8 values", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{1, 0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("batch or depth dimensions", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 1, 0});
INFER_ERROR("batch or depth dimensions", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("must be empty", op, "[1,2,2,1];[1,1,1,1]");
}
TEST_P(Conv2DShapeTest, Conv2DDilatedShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& dilations,
const std::vector<int32>& strides, const string& padding,
const string& data_format,
const std::vector<int32>& explicit_paddings = {}) {
string format;
if (op.name == "Conv")
format = (data_format == "NHWC") ? "CHANNELS_LAST" : "CHANNELS_FIRST";
else
format = data_format;
TF_CHECK_OK(NodeDefBuilder("test", "Conv2D")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("dilations", dilations)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("data_format", format)
.Attr("batch_dims", 1)
.Attr("groups", 1)
.Finalize(&op.node_def));
};
set_op({{1, 2, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_ERROR("contain 4 values", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 0, 1, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_ERROR("must be >= 1", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,2,2,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 2, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,2,4,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 2, 2, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,5,5,1];[3,3,1,1]", "[d0_0,1,3,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 2, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,5,5,1];[3,3,1,1]", "[d0_0,1,3,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 2, 2, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,5,5,1];[3,3,1,1]", "[d0_0,2,1,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 1, 1, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,2,2];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 2, 1}}, {{1, 1, 2, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,4,4];[1,1,1,1]", "[d0_0,d1_3,2,4]");
set_op({{1, 1, 2, 1}}, {{1, 1, 2, 2}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,4,4];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 2, 1}}, {{1, 1, 1, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,5,5];[3,3,1,1]", "[d0_0,d1_3,1,3]");
set_op({{1, 1, 2, 1}}, {{1, 1, 2, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,5,5];[3,3,1,1]", "[d0_0,d1_3,1,3]");
set_op({{1, 1, 1, 2}}, {{1, 1, 2, 2}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,5,5];[3,3,1,1]", "[d0_0,d1_3,2,1]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 2, 2, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"EXPLICIT", "NHWC",
{0, 0, 0, 2, 1, 4, 0, 0});
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,6,9,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"EXPLICIT", "NHWC",
{0, 0, 1, 0, 1, 2, 0, 0});
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,2,4,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 2, 2, 1}},
"EXPLICIT", "NHWC",
{0, 0, 3, 2, 1, 0, 0, 0});
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,4,2,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"EXPLICIT", "NHWC",
{0, 0, 1, 1, 2, 2, 0, 0});
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,4,6,d1_3]");
}
TEST(CommonShapeFnsTest, Conv3DShapeRankTest) {
ShapeInferenceTestOp op("Conv3D");
INFER_ERROR("must be rank 5", op, "[4,4];[2,1,1,1]");
INFER_ERROR("must be rank 5", op, "[1,4,4,1];[2,1,1]");
}
TEST(CommonShapeFnsTest, Conv3DGroupsTest) {
ShapeInferenceTestOp op("Conv3D");
auto set_op = [&op](const std::vector<int32>& strides,
const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", "Conv3D")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 1, 1}}, "VALID");
INFER_ERROR(
"Depth of input (10) is not a multiple of input depth of filter (6)", op,
"[1,2,2,2,10];[1,1,1,6,20]");
INFER_ERROR(
"Depth of output (1) is not a multiple of the number of groups (2)", op,
"[1,2,2,2,10];[1,1,1,5,1]");
set_op({{1, 1, 1, 1, 1}}, "SAME");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,1,10]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
}
INSTANTIATE_TEST_SUITE_P(CommonShapeFnsTest, Conv2DShapeTest,
::testing::Values("Conv2D", "Conv"));
class Conv3DShapeTest : public ::testing::TestWithParam<string> {};
TEST_P(Conv3DShapeTest, Conv3DShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& strides,
const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 0, 1}}, "VALID");
INFER_ERROR("must be > 0", op, "[1,2,2,2,1];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
INFER_OK(op, "[1,?,2,2,1];[1,1,1,1,1]", "[d0_0,?,2,2,d1_4]");
INFER_OK(op, "[1,2,?,2,1];[1,1,1,1,1]", "[d0_0,2,?,2,d1_4]");
INFER_OK(op, "[1,2,2,?,1];[1,1,1,1,1]", "[d0_0,2,2,?,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[?,1,1,1,1]", "[d0_0,?,2,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,?,1,1,1]", "[d0_0,2,?,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,?,1,1]", "[d0_0,2,2,?,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,?,1]", "[d0_0,2,2,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,?]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "VALID");
INFER_OK(op, "[1,2,2,2,1];[2,2,2,1,1]", "[d0_0,1,1,1,d1_4]");
set_op({{1, 2, 2, 2, 1}}, "VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 2, 1, 1, 1}}, "VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,3,3,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "SAME");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "SAME");
INFER_OK(op, "[?,4,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,?,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,?,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,?,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,?];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[?,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,?,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,2,?,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,?,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,1,?]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
set_op({{1, 2, 3, 4, 1}}, "SAME");
INFER_OK(op, "[1,4,9,4,1];[2,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[?,4,9,4,1];[2,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,?,9,4,1];[2,2,2,1,1]", "[d0_0,?,3,1,d1_4]");
INFER_OK(op, "[1,4,?,4,1];[2,2,2,1,1]", "[d0_0,2,?,1,d1_4]");
INFER_OK(op, "[1,4,9,?,1];[2,2,2,1,1]", "[d0_0,2,3,?,d1_4]");
INFER_OK(op, "[1,4,9,4,?];[2,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[?,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,?,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,2,?,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,2,2,?,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,2,2,1,?]", "[d0_0,2,3,1,d1_4]");
}
TEST_P(Conv3DShapeTest, Conv3DDilatedShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& dilations,
const std::vector<int32>& strides,
const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("dilations", dilations)
.Attr("strides", strides)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_ERROR("contain 5 values", op, "[1,2,2,2,1];[1,1,1,1,1]");
set_op({{1, 2, 0, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_ERROR("must be >= 1", op, "[1,2,2,2,1];[1,1,1,1,1]");
set_op({{1, 2, 1, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_OK(op, "[1,3,2,2,1];[2,2,2,1,1]", "[d0_0,1,1,1,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 2, 2, 2, 1}},
"VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 2, 1, 1, 1}},
"VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,3,3,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 1, 1, 1, 1}},
"SAME");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
}
INSTANTIATE_TEST_SUITE_P(CommonShapeFnsTest, Conv3DShapeTest,
::testing::Values("Conv3D", "Conv"));
TEST(CommonShapeFnsTest, DepthwiseConv2DShapeTest) {
ShapeInferenceTestOp op("DepthwiseConv2dNative");
std::vector<int32> strides = {{1, 1, 1, 1}};
TF_CHECK_OK(NodeDefBuilder("test", "DepthwiseConv2dNative")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", "VALID")
.Attr("data_format", "NHWC")
.Finalize(&op.node_def));
INFER_OK(op, "[1,2,2,3];[1,1,3,4]", "[d0_0,2,2,12]");
INFER_ERROR("Dimensions must be equal, but are 3 and 12", op,
"[1,2,2,3];[1,1,12,4]");
INFER_OK(op, "[1,2,2,3];[1,1,3,4]", "[d0_0,2,2,12]");
INFER_OK(op, "[1,?,2,3];[1,1,3,4]", "[d0_0,?,2,12]");
INFER_OK(op, "[1,2,?,3];[1,1,3,4]", "[d0_0,2,?,12]");
INFER_OK(op, "[1,2,2,3];[?,1,3,4]", "[d0_0,?,2,12]");
INFER_OK(op, "[1,2,2,3];[1,?,3,4]", "[d0_0,2,?,12]");
INFER_OK(op, "[1,2,2,3];[1,1,?,4]", "[d0_0,2,2,12]");
INFER_OK(op, "[1,2,2,?];[1,1,?,4]", "[d0_0,2,2,?]");
INFER_OK(op, "[1,2,2,3];[1,1,3,?]", "[d0_0,2,2,?]");
TF_CHECK_OK(NodeDefBuilder("test", "DepthwiseConv2dNative")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", "VALID")
.Attr("data_format", "NCHW")
.Finalize(&op.node_def));
INFER_OK(op, "[1,3,2,2];[1,1,3,4]", "[d0_0,12,2,2]");
}
TEST(CommonShapeFnsTest, AvgPool2DShapeTest) {
ShapeInferenceTestOp op("AvgPool");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding,
const string& data_format) {
TF_CHECK_OK(NodeDefBuilder("test", "AvgPool")
.Input("input", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("ksize", ksizes)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Finalize(&op.node_def));
};
set_op({1, 1, 1, 1}, {1, 1, 1, 1}, "VALID", "NHWC");
INFER_OK(op, "[1,2,2,1]", "[d0_0,2,2,d0_3]");
set_op({1, 1, 2, 1}, {1, 2, 1, 1}, "VALID", "NHWC");
INFER_OK(op, "[1,4,4,1]", "[d0_0,3,2,d0_3]");
INFER_OK(op, "[1,?,4,1]", "[d0_0,?,2,d0_3]");
INFER_OK(op, "[1,4,?,1]", "[d0_0,3,?,d0_3]");
set_op({{1, 1, 1, 2}}, {1, 1, 2, 1}, "VALID", "NCHW");
INFER_OK(op, "[1,1,4,4]", "[d0_0,d0_1,3,2]");
set_op({{1, 1, 1, 1}}, {1, 1, 2, 2}, "VALID", "NCHW_VECT_C");
INFER_OK(op, "[2,3,5,7,4]", "[d0_0,d0_1,4,6,4]");
INFER_OK(op, "[5,7,?,?,4]", "[d0_0,d0_1,?,?,4]");
INFER_OK(op, "[?,?,?,?,4]", "[d0_0,d0_1,?,?,4]");
INFER_ERROR("must be 4 or 32, but is 3", op, "[2,5,7,11,3]");
INFER_ERROR("Shape must be rank", op, "[4,4]");
}
TEST(CommonShapeFnsTest, MaxPool2DShapeTest) {
ShapeInferenceTestOp op("MaxPool");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding,
const string& data_format) {
TF_CHECK_OK(NodeDefBuilder("test", "MaxPool")
.Input("input", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("ksize", ksizes)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Finalize(&op.node_def));
};
set_op({1, 1, 1, 1}, {1, 1, 1, 2}, "VALID", "NHWC");
INFER_OK(op, "[1,2,2,2]", "[d0_0,2,2,1]");
set_op({1, 3, 1, 1}, {1, 1, 1, 1}, "VALID", "NCHW");
INFER_OK(op, "[1,7,5,5]", "[d0_0,3,5,5]");
set_op({{1, 1, 1, 1}}, {1, 1, 2, 2}, "SAME", "NCHW_VECT_C");
INFER_OK(op, "[2,3,5,7,4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[5,7,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[?,?,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_ERROR("must be 4 or 32, but is 8", op, "[2,3,5,7,8]");
}
TEST(CommonShapeFnsTest, MaxPoolV22DShapeTest) {
ShapeInferenceTestOp op("MaxPoolV2");
Tensor ksizes_tensor, strides_tensor;
auto set_op = [&op, &ksizes_tensor, &strides_tensor](
const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding,
const string& data_format) {
TF_CHECK_OK(NodeDefBuilder("test", "MaxPoolV2")
.Input("input", 0, DT_FLOAT)
.Input("ksize", 1, DT_INT32)
.Input("strides", 2, DT_INT32)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Finalize(&op.node_def));
ksizes_tensor = test::AsTensor<int32>(ksizes);
op.input_tensors.resize(3);
op.input_tensors[0] = nullptr;
op.input_tensors[1] = &ksizes_tensor;
strides_tensor = test::AsTensor<int32>(strides);
op.input_tensors[2] = &strides_tensor;
};
set_op({1, 1, 1, 1}, {1, 1, 1, 2}, "VALID", "NHWC");
INFER_OK(op, "[1,2,2,2];[4];[4]", "[d0_0,2,2,1]");
set_op({1, 3, 1, 1}, {1, 1, 1, 1}, "VALID", "NCHW");
INFER_OK(op, "[1,7,5,5];[4];[4]", "[d0_0,3,5,5]");
set_op({{1, 1, 1, 1}}, {1, 1, 2, 2}, "SAME", "NCHW_VECT_C");
INFER_OK(op, "[2,3,5,7,4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[5,7,?,?,4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[?,?,?,?,4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_ERROR("must be 4 or 32, but is 8", op, "[2,3,5,7,8];[4];[4]");
}
TEST(CommonShapeFnsTest, Pool3DShapeTest) {
ShapeInferenceTestOp op("MaxPool3D");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", "MaxPool3D")
.Input("input", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("ksize", ksizes)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({1, 2, 3, 4, 1}, {1, 1, 1, 1, 1}, "VALID");
INFER_OK(op, "[1,24,24,24,1]", "[d0_0,12,8,6,d0_4]");
set_op({1, 1, 3, 4, 1}, {1, 1, 1, 1, 1}, "VALID");
INFER_OK(op, "[1,?,24,24,1]", "[d0_0,?,8,6,d0_4]");
}
TEST(CommonShapeFnsTest, UnknownShapeTest) {
{
ShapeInferenceTestOp op("QueueDequeue");
TF_CHECK_OK(NodeDefBuilder("test", "QueueDequeue")
.Input("handle", 0, DT_STRING_REF)
.Attr("component_types", {DT_FLOAT})
.Finalize(&op.node_def));
INFER_OK(op, "[1]", "?");
}
{
ShapeInferenceTestOp op("QueueDequeue");
TF_CHECK_OK(NodeDefBuilder("test", "QueueDequeue")
.Input("handle", 0, DT_STRING_REF)
.Attr("component_types", {DT_FLOAT, DT_FLOAT, DT_STRING})
.Finalize(&op.node_def));
INFER_OK(op, "[1]", "?;?;?");
}
}
TEST(CommonShapeFnsTest, Reduce_ShapeFn) {
ShapeInferenceTestOp op("Sum");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "Sum")
.Input("input", 0, DT_FLOAT)
.Input("reduction_indices", 1, DT_INT32)
.Attr("keep_dims", false)
.Finalize(&op.node_def));
INFER_OK(op, "[2,4,5];[2]", "?");
INFER_OK(op, "?;[2]", "?");
Tensor indices = test::AsTensor<int32>({1, 2});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[2]", "[d0_0]");
indices = test::AsTensor<int32>({-1, -2});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[2]", "[d0_0]");
indices = test::AsScalar<int32>(0);
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[]", "[d0_1,d0_2]");
indices = test::AsScalar<int32>(-4);
op.input_tensors[1] = &indices;
INFER_ERROR("Invalid reduction dimension", op, "[2,4,5];[]");
indices = test::AsTensor<int32>({});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[0]", "[d0_0,d0_1,d0_2]");
TF_ASSERT_OK(NodeDefBuilder("test", "Sum")
.Input("input", 0, DT_FLOAT)
.Input("reduction_indices", 1, DT_INT32)
.Attr("keep_dims", true)
.Finalize(&op.node_def));
indices = test::AsTensor<int32>({-1, -2});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[2]", "[d0_0, 1, 1]");
op.input_tensors[1] = nullptr;
INFER_OK(op, "[?,?,?];?", "[?,?,?]");
INFER_OK(op, "[?,?,?];[2]", "[?,?,?]");
INFER_ERROR("must be at most rank 1 but is rank 2", op, "[?,?,?];[?,?]");
op.graph_def_version = 20;
INFER_OK(op, "[?,?,?];[?,?]", "[?,?,?]");
op.input_tensors[1] = &indices;
indices = test::AsTensor<int32>({-1, -2}, TensorShape({2, 1}));
INFER_OK(op, "[2,4,5];[2,1]", "[d0_0, 1, 1]");
indices = test::AsTensor<int32>({-1, -2}, TensorShape({1, 2}));
INFER_OK(op, "[2,4,5];[1,2]", "[d0_0, 1, 1]");
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownShapes) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{Unknown(), Unknown(), Unknown()}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownDims) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({-1, -1}), S({-1}), S({-1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_InvalidIndicesRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({-1}), S({-1}), S({-1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
EXPECT_EQ(error::INVALID_ARGUMENT,
ValidateSparseTensor(&c, indices, values, shape).code());
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_InvalidNumElements) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({4}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
EXPECT_EQ(error::INVALID_ARGUMENT,
ValidateSparseTensor(&c, indices, values, shape).code());
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_InvalidRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({5}), S({4})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
EXPECT_EQ(error::INVALID_ARGUMENT,
ValidateSparseTensor(&c, indices, values, shape).code());
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownNumIndexElements) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({-1, 3}), S({5}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownNumValueElements) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({-1}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownIndexRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, -1}), S({5}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownShapeRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({5}), S({-1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({5}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ReduceScatterSuccess) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
const Tensor scatter_dimension = Tensor(0);
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 2}), S({1, 2}), S({1})},
{nullptr, nullptr, &scatter_dimension}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
TF_EXPECT_OK(ReduceScatterShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(1, c.Value(c.Dim(output, 0)));
EXPECT_EQ(2, c.Value(c.Dim(output, 1)));
}
TEST(CommonShapeFnsTest, ReduceScatter_MissingScatterDimension) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 2}), S({1, 2}), S({1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
TF_EXPECT_OK(ReduceScatterShape(&c));
ShapeHandle output = c.output(0);
EXPECT_FALSE(c.ValueKnown(c.Dim(output, 0)));
EXPECT_FALSE(c.ValueKnown(c.Dim(output, 1)));
}
TEST(CommonShapeFnsTest, ReduceScatter_NotEvenlyDivisible) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
const Tensor scatter_dimension = Tensor(0);
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({3, 3}), S({1, 2}), S({1})},
{nullptr, nullptr, &scatter_dimension}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
EXPECT_THAT(ReduceScatterShape(&c),
tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT,
"Dimension size must be evenly divisible by 2 but is 3"));
}
TEST(CommonShapeFnsTest, ReduceScatter_INVALID_GROUP_ASSIGNMENT) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
const Tensor scatter_dimension = Tensor(0);
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({3, 3}), S({2}), S({1})},
{nullptr, nullptr, &scatter_dimension}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
EXPECT_THAT(ReduceScatterShape(&c),
tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT,
"ReduceScatter group_assignment should be rank 2"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/common_shape_fns.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/common_shape_fns_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c62d5172-4d32-4cae-98d8-797551044cf6 | cpp | google/libaddressinput | lookup_key | cpp/src/lookup_key.cc | cpp/test/lookup_key_test.cc | #include "lookup_key.h"
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <string>
#include "language.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/cctype_tolower_equal.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
const char kSlashDelim[] = "/";
const char kDashDelim[] = "--";
const char kData[] = "data";
const char kUnknown[] = "ZZ";
bool ShouldSetLanguageForKey(const std::string& language_tag,
const std::string& region_code) {
if (RegionDataConstants::GetMaxLookupKeyDepth(region_code) == 0) {
return false;
}
Rule rule;
rule.CopyFrom(Rule::GetDefault());
if (!rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(region_code))) {
return false;
}
const auto& languages = rule.GetLanguages();
if (languages.empty() || languages[0] == language_tag) {
return false;
}
using std::placeholders::_1;
return std::find_if(languages.begin() + 1, languages.end(),
std::bind(&EqualToTolowerString, _1, language_tag)) !=
languages.end();
}
}
const AddressField LookupKey::kHierarchy[] = {
COUNTRY,
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
};
LookupKey::LookupKey() = default;
LookupKey::~LookupKey() = default;
void LookupKey::FromAddress(const AddressData& address) {
nodes_.clear();
if (address.region_code.empty()) {
nodes_.emplace(COUNTRY, kUnknown);
} else {
for (AddressField field : kHierarchy) {
if (address.IsFieldEmpty(field)) {
break;
}
const std::string& value = address.GetFieldValue(field);
if (value.find('/') != std::string::npos) {
break;
}
nodes_.emplace(field, value);
}
}
Language address_language(address.language_code);
std::string language_tag_no_latn = address_language.has_latin_script
? address_language.base
: address_language.tag;
if (ShouldSetLanguageForKey(language_tag_no_latn, address.region_code)) {
language_ = language_tag_no_latn;
}
}
void LookupKey::FromLookupKey(const LookupKey& parent,
const std::string& child_node) {
assert(parent.nodes_.size() < size(kHierarchy));
assert(!child_node.empty());
if (this != &parent) nodes_ = parent.nodes_;
AddressField child_field = kHierarchy[nodes_.size()];
nodes_.emplace(child_field, child_node);
}
std::string LookupKey::ToKeyString(size_t max_depth) const {
assert(max_depth < size(kHierarchy));
std::string key_string(kData);
for (size_t i = 0; i <= max_depth; ++i) {
AddressField field = kHierarchy[i];
auto it = nodes_.find(field);
if (it == nodes_.end()) {
break;
}
key_string.append(kSlashDelim);
key_string.append(it->second);
}
if (!language_.empty()) {
key_string.append(kDashDelim);
key_string.append(language_);
}
return key_string;
}
const std::string& LookupKey::GetRegionCode() const {
auto it = nodes_.find(COUNTRY);
assert(it != nodes_.end());
return it->second;
}
size_t LookupKey::GetDepth() const {
size_t depth = nodes_.size() - 1;
assert(depth < size(kHierarchy));
return depth;
}
}
} | #include "lookup_key.h"
#include <libaddressinput/address_data.h>
#include <cstddef>
#include <gtest/gtest.h>
#include "util/size.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::LookupKey;
const size_t kMaxDepth = size(LookupKey::kHierarchy) - 1;
TEST(LookupKeyTest, Empty) {
const AddressData address;
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/ZZ", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, AddressDepth1) {
const AddressData address{.region_code = "111"};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(0, lookup_key.GetDepth());
EXPECT_EQ("data/111", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, AddressDepth2) {
const AddressData address{
.region_code = "111",
.administrative_area = "222",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(1, lookup_key.GetDepth());
EXPECT_EQ("data/111/222", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, AddressDepth3) {
const AddressData address{
.region_code = "111",
.administrative_area = "222",
.locality = "333",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(2, lookup_key.GetDepth());
EXPECT_EQ("data/111/222/333", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, AddressDepth4) {
const AddressData address{
.region_code = "111",
.administrative_area = "222",
.locality = "333",
.dependent_locality = "444",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(3, lookup_key.GetDepth());
EXPECT_EQ("data/111/222/333/444", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, AddressDepthNonContiguous) {
const AddressData address{
.region_code = "111",
.administrative_area = "222",
.dependent_locality = "444",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(1, lookup_key.GetDepth());
EXPECT_EQ("data/111/222", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, AddressDepthTerminateOnSlash) {
const AddressData address{
.region_code = "111",
.administrative_area = "222",
.locality = "3/3",
.dependent_locality = "444",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(1, lookup_key.GetDepth());
EXPECT_EQ("data/111/222", lookup_key.ToKeyString(kMaxDepth));
}
TEST(LookupKeyTest, RequestDepth) {
const AddressData address{
.region_code = "111",
.administrative_area = "222",
.locality = "333",
.dependent_locality = "444",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/111", lookup_key.ToKeyString(0));
EXPECT_EQ("data/111/222", lookup_key.ToKeyString(1));
EXPECT_EQ("data/111/222/333", lookup_key.ToKeyString(2));
EXPECT_EQ("data/111/222/333/444", lookup_key.ToKeyString(3));
}
TEST(LookupKeyTest, WithLanguageCodeDefaultLanguage) {
const AddressData address{
.region_code = "CA",
.administrative_area = "ON",
.language_code = "en",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/CA", lookup_key.ToKeyString(0));
EXPECT_EQ("data/CA/ON", lookup_key.ToKeyString(1));
}
TEST(LookupKeyTest, WithLanguageCodeAlternateLanguage) {
const AddressData address{
.region_code = "CA",
.administrative_area = "ON",
.language_code = "fr",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/CA--fr", lookup_key.ToKeyString(0));
EXPECT_EQ("data/CA/ON--fr", lookup_key.ToKeyString(1));
}
TEST(LookupKeyTest, WithLanguageCodeInvalidLanguage) {
const AddressData address{
.region_code = "CA",
.administrative_area = "ON",
.language_code = "de",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/CA", lookup_key.ToKeyString(0));
EXPECT_EQ("data/CA/ON", lookup_key.ToKeyString(1));
}
TEST(LookupKeyTest, WithLanguageCodeAlternateLanguageNoState) {
const AddressData address{
.region_code = "AF",
.language_code = "ps",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/AF", lookup_key.ToKeyString(0));
}
TEST(LookupKeyTest, GetRegionCode) {
const AddressData address{.region_code = "rrr"};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ(address.region_code, lookup_key.GetRegionCode());
}
TEST(LookupKeyTest, FromAddressClearsExistingNodes) {
AddressData address{
.region_code = "111",
.administrative_area = "222",
};
LookupKey lookup_key;
lookup_key.FromAddress(address);
EXPECT_EQ("data/111/222", lookup_key.ToKeyString(kMaxDepth));
address.administrative_area.clear();
lookup_key.FromAddress(address);
EXPECT_EQ("data/111", lookup_key.ToKeyString(kMaxDepth));
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/lookup_key.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/lookup_key_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
4640a7a9-8921-4f09-a4bb-3add88d26430 | cpp | abseil/abseil-cpp | any_invocable | absl/functional/internal/any_invocable.h | absl/functional/any_invocable_test.cc | #ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
#include <cassert>
#include <cstddef>
#include <cstring>
#include <exception>
#include <functional>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/invoke.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
#else
#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
#endif
template <class Sig>
class AnyInvocable;
namespace internal_any_invocable {
enum StorageProperty : std::size_t {
kAlignment = alignof(std::max_align_t),
kStorageSize = sizeof(void*) * 2
};
template <class T>
struct IsAnyInvocable : std::false_type {};
template <class Sig>
struct IsAnyInvocable<AnyInvocable<Sig>> : std::true_type {};
template <class T>
using IsStoredLocally = std::integral_constant<
bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment &&
kAlignment % alignof(T) == 0 &&
std::is_nothrow_move_constructible<T>::value>;
template <class T>
using RemoveCVRef =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
template <class ReturnType, class F, class... P,
typename = absl::enable_if_t<std::is_void<ReturnType>::value>>
void InvokeR(F&& f, P&&... args) {
absl::base_internal::invoke(std::forward<F>(f), std::forward<P>(args)...);
}
template <class ReturnType, class F, class... P,
absl::enable_if_t<!std::is_void<ReturnType>::value, int> = 0>
ReturnType InvokeR(F&& f, P&&... args) {
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
return absl::base_internal::invoke(std::forward<F>(f),
std::forward<P>(args)...);
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
}
template <typename T>
T ForwardImpl(std::true_type);
template <typename T>
T&& ForwardImpl(std::false_type);
template <class T>
struct ForwardedParameter {
using type = decltype((
ForwardImpl<T>)(std::integral_constant<bool,
std::is_scalar<T>::value>()));
};
template <class T>
using ForwardedParameterType = typename ForwardedParameter<T>::type;
enum class FunctionToCall : bool { relocate_from_to, dispose };
union TypeErasedState {
struct {
void* target;
std::size_t size;
} remote;
alignas(kAlignment) char storage[kStorageSize];
};
template <class T>
T& ObjectInLocalStorage(TypeErasedState* const state) {
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return *std::launder(reinterpret_cast<T*>(&state->storage));
#elif ABSL_HAVE_BUILTIN(__builtin_launder)
return *__builtin_launder(reinterpret_cast<T*>(&state->storage));
#else
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
return *reinterpret_cast<T*>(&state->storage);
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
#endif
}
using ManagerType = void(FunctionToCall ,
TypeErasedState* , TypeErasedState* )
ABSL_INTERNAL_NOEXCEPT_SPEC(true);
template <bool SigIsNoexcept, class ReturnType, class... P>
using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType<P>...)
ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept);
inline void EmptyManager(FunctionToCall ,
TypeErasedState* ,
TypeErasedState* ) noexcept {}
inline void LocalManagerTrivial(FunctionToCall ,
TypeErasedState* const from,
TypeErasedState* const to) noexcept {
*to = *from;
}
template <class T>
void LocalManagerNontrivial(FunctionToCall operation,
TypeErasedState* const from,
TypeErasedState* const to) noexcept {
static_assert(IsStoredLocally<T>::value,
"Local storage must only be used for supported types.");
static_assert(!std::is_trivially_copyable<T>::value,
"Locally stored types must be trivially copyable.");
T& from_object = (ObjectInLocalStorage<T>)(from);
switch (operation) {
case FunctionToCall::relocate_from_to:
::new (static_cast<void*>(&to->storage)) T(std::move(from_object));
ABSL_FALLTHROUGH_INTENDED;
case FunctionToCall::dispose:
from_object.~T();
return;
}
ABSL_UNREACHABLE();
}
template <bool SigIsNoexcept, class ReturnType, class QualTRef, class... P>
ReturnType LocalInvoker(
TypeErasedState* const state,
ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
using RawT = RemoveCVRef<QualTRef>;
static_assert(
IsStoredLocally<RawT>::value,
"Target object must be in local storage in order to be invoked from it.");
auto& f = (ObjectInLocalStorage<RawT>)(state);
return (InvokeR<ReturnType>)(static_cast<QualTRef>(f),
static_cast<ForwardedParameterType<P>>(args)...);
}
inline void RemoteManagerTrivial(FunctionToCall operation,
TypeErasedState* const from,
TypeErasedState* const to) noexcept {
switch (operation) {
case FunctionToCall::relocate_from_to:
to->remote = from->remote;
return;
case FunctionToCall::dispose:
#if defined(__cpp_sized_deallocation)
::operator delete(from->remote.target, from->remote.size);
#else
::operator delete(from->remote.target);
#endif
return;
}
ABSL_UNREACHABLE();
}
template <class T>
void RemoteManagerNontrivial(FunctionToCall operation,
TypeErasedState* const from,
TypeErasedState* const to) noexcept {
static_assert(!IsStoredLocally<T>::value,
"Remote storage must only be used for types that do not "
"qualify for local storage.");
switch (operation) {
case FunctionToCall::relocate_from_to:
to->remote.target = from->remote.target;
return;
case FunctionToCall::dispose:
::delete static_cast<T*>(from->remote.target);
return;
}
ABSL_UNREACHABLE();
}
template <bool SigIsNoexcept, class ReturnType, class QualTRef, class... P>
ReturnType RemoteInvoker(
TypeErasedState* const state,
ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
using RawT = RemoveCVRef<QualTRef>;
static_assert(!IsStoredLocally<RawT>::value,
"Target object must be in remote storage in order to be "
"invoked from it.");
auto& f = *static_cast<RawT*>(state->remote.target);
return (InvokeR<ReturnType>)(static_cast<QualTRef>(f),
static_cast<ForwardedParameterType<P>>(args)...);
}
template <class T>
struct IsInPlaceType : std::false_type {};
template <class T>
struct IsInPlaceType<absl::in_place_type_t<T>> : std::true_type {};
template <class QualDecayedTRef>
struct TypedConversionConstruct {};
template <class Sig>
class Impl {};
#if defined(__cpp_sized_deallocation)
class TrivialDeleter {
public:
explicit TrivialDeleter(std::size_t size) : size_(size) {}
void operator()(void* target) const {
::operator delete(target, size_);
}
private:
std::size_t size_;
};
#else
class TrivialDeleter {
public:
explicit TrivialDeleter(std::size_t) {}
void operator()(void* target) const { ::operator delete(target); }
};
#endif
template <bool SigIsNoexcept, class ReturnType, class... P>
class CoreImpl;
constexpr bool IsCompatibleConversion(void*, void*) { return false; }
template <bool NoExceptSrc, bool NoExceptDest, class... T>
constexpr bool IsCompatibleConversion(CoreImpl<NoExceptSrc, T...>*,
CoreImpl<NoExceptDest, T...>*) {
return !NoExceptDest || NoExceptSrc;
}
template <bool SigIsNoexcept, class ReturnType, class... P>
class CoreImpl {
public:
using result_type = ReturnType;
CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {}
enum class TargetType {
kPointer,
kCompatibleAnyInvocable,
kIncompatibleAnyInvocable,
kOther,
};
template <class QualDecayedTRef, class F>
explicit CoreImpl(TypedConversionConstruct<QualDecayedTRef>, F&& f) {
using DecayedT = RemoveCVRef<QualDecayedTRef>;
constexpr TargetType kTargetType =
(std::is_pointer<DecayedT>::value ||
std::is_member_pointer<DecayedT>::value)
? TargetType::kPointer
: IsCompatibleAnyInvocable<DecayedT>::value
? TargetType::kCompatibleAnyInvocable
: IsAnyInvocable<DecayedT>::value
? TargetType::kIncompatibleAnyInvocable
: TargetType::kOther;
Initialize<kTargetType, QualDecayedTRef>(std::forward<F>(f));
}
template <class QualTRef, class... Args>
explicit CoreImpl(absl::in_place_type_t<QualTRef>, Args&&... args) {
InitializeStorage<QualTRef>(std::forward<Args>(args)...);
}
CoreImpl(CoreImpl&& other) noexcept {
other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
manager_ = other.manager_;
invoker_ = other.invoker_;
other.manager_ = EmptyManager;
other.invoker_ = nullptr;
}
CoreImpl& operator=(CoreImpl&& other) noexcept {
Clear();
other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
manager_ = other.manager_;
invoker_ = other.invoker_;
other.manager_ = EmptyManager;
other.invoker_ = nullptr;
return *this;
}
~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); }
bool HasValue() const { return invoker_ != nullptr; }
void Clear() {
manager_(FunctionToCall::dispose, &state_, &state_);
manager_ = EmptyManager;
invoker_ = nullptr;
}
template <TargetType target_type, class QualDecayedTRef, class F,
absl::enable_if_t<target_type == TargetType::kPointer, int> = 0>
void Initialize(F&& f) {
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Waddress"
#pragma GCC diagnostic ignored "-Wnonnull-compare"
#endif
if (static_cast<RemoveCVRef<QualDecayedTRef>>(f) == nullptr) {
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
manager_ = EmptyManager;
invoker_ = nullptr;
return;
}
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
}
template <TargetType target_type, class QualDecayedTRef, class F,
absl::enable_if_t<
target_type == TargetType::kCompatibleAnyInvocable, int> = 0>
void Initialize(F&& f) {
f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
manager_ = f.manager_;
invoker_ = f.invoker_;
f.manager_ = EmptyManager;
f.invoker_ = nullptr;
}
template <TargetType target_type, class QualDecayedTRef, class F,
absl::enable_if_t<
target_type == TargetType::kIncompatibleAnyInvocable, int> = 0>
void Initialize(F&& f) {
if (f.HasValue()) {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
} else {
manager_ = EmptyManager;
invoker_ = nullptr;
}
}
template <TargetType target_type, class QualDecayedTRef, class F,
typename = absl::enable_if_t<target_type == TargetType::kOther>>
void Initialize(F&& f) {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
}
template <class QualTRef, class... Args,
typename = absl::enable_if_t<
IsStoredLocally<RemoveCVRef<QualTRef>>::value>>
void InitializeStorage(Args&&... args) {
using RawT = RemoveCVRef<QualTRef>;
::new (static_cast<void*>(&state_.storage))
RawT(std::forward<Args>(args)...);
invoker_ = LocalInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
InitializeLocalManager<RawT>();
}
template <class QualTRef, class... Args,
absl::enable_if_t<!IsStoredLocally<RemoveCVRef<QualTRef>>::value,
int> = 0>
void InitializeStorage(Args&&... args) {
InitializeRemoteManager<RemoveCVRef<QualTRef>>(std::forward<Args>(args)...);
invoker_ = RemoteInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
}
template <class T,
typename = absl::enable_if_t<std::is_trivially_copyable<T>::value>>
void InitializeLocalManager() {
manager_ = LocalManagerTrivial;
}
template <class T,
absl::enable_if_t<!std::is_trivially_copyable<T>::value, int> = 0>
void InitializeLocalManager() {
manager_ = LocalManagerNontrivial<T>;
}
template <class T>
using HasTrivialRemoteStorage =
std::integral_constant<bool, std::is_trivially_destructible<T>::value &&
alignof(T) <=
ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>;
template <class T, class... Args,
typename = absl::enable_if_t<HasTrivialRemoteStorage<T>::value>>
void InitializeRemoteManager(Args&&... args) {
std::unique_ptr<void, TrivialDeleter> uninitialized_target(
::operator new(sizeof(T)), TrivialDeleter(sizeof(T)));
::new (uninitialized_target.get()) T(std::forward<Args>(args)...);
state_.remote.target = uninitialized_target.release();
state_.remote.size = sizeof(T);
manager_ = RemoteManagerTrivial;
}
template <class T, class... Args,
absl::enable_if_t<!HasTrivialRemoteStorage<T>::value, int> = 0>
void InitializeRemoteManager(Args&&... args) {
state_.remote.target = ::new T(std::forward<Args>(args)...);
manager_ = RemoteManagerNontrivial<T>;
}
template <typename Other>
struct IsCompatibleAnyInvocable {
static constexpr bool value = false;
};
template <typename Sig>
struct IsCompatibleAnyInvocable<AnyInvocable<Sig>> {
static constexpr bool value =
(IsCompatibleConversion)(static_cast<
typename AnyInvocable<Sig>::CoreImpl*>(
nullptr),
static_cast<CoreImpl*>(nullptr));
};
TypeErasedState state_;
ManagerType* manager_;
InvokerType<SigIsNoexcept, ReturnType, P...>* invoker_;
};
struct ConversionConstruct {};
template <class T>
struct UnwrapStdReferenceWrapperImpl {
using type = T;
};
template <class T>
struct UnwrapStdReferenceWrapperImpl<std::reference_wrapper<T>> {
using type = T&;
};
template <class T>
using UnwrapStdReferenceWrapper =
typename UnwrapStdReferenceWrapperImpl<T>::type;
template <class... T>
using TrueAlias =
std::integral_constant<bool, sizeof(absl::void_t<T...>*) != 0>;
template <class Sig, class F,
class = absl::enable_if_t<
!std::is_same<RemoveCVRef<F>, AnyInvocable<Sig>>::value>>
using CanConvert = TrueAlias<
absl::enable_if_t<!IsInPlaceType<RemoveCVRef<F>>::value>,
absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
absl::enable_if_t<
Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
absl::enable_if_t<std::is_constructible<absl::decay_t<F>, F>::value>>;
template <class Sig, class F, class... Args>
using CanEmplace = TrueAlias<
absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
absl::enable_if_t<
Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
absl::enable_if_t<std::is_constructible<absl::decay_t<F>, Args...>::value>>;
template <class Sig, class F,
class = absl::enable_if_t<
!std::is_same<RemoveCVRef<F>, AnyInvocable<Sig>>::value>>
using CanAssign = TrueAlias<
absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
absl::enable_if_t<
Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
absl::enable_if_t<std::is_constructible<absl::decay_t<F>, F>::value>>;
template <class Sig, class F>
using CanAssignReferenceWrapper = TrueAlias<
absl::enable_if_t<
Impl<Sig>::template CallIsValid<std::reference_wrapper<F>>::value>,
absl::enable_if_t<Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<
std::reference_wrapper<F>>::value>>;
#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \
ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals)
#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \
absl::enable_if_t<absl::disjunction< \
std::is_nothrow_invocable_r< \
ReturnType, UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, \
P...>, \
std::conjunction< \
std::is_nothrow_invocable< \
UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, P...>, \
std::is_same< \
ReturnType, \
absl::base_internal::invoke_result_t< \
UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, \
P...>>>>::value>
#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals)
#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \
template <class ReturnType, class... P> \
class Impl<ReturnType(P...) cv ref ABSL_INTERNAL_NOEXCEPT_SPEC(noex)> \
: public CoreImpl<noex, ReturnType, P...> { \
public: \
\
using Core = CoreImpl<noex, ReturnType, P...>; \
\
\
template <class F> \
using CallIsValid = TrueAlias<absl::enable_if_t<absl::disjunction< \
absl::base_internal::is_invocable_r<ReturnType, \
absl::decay_t<F> inv_quals, P...>, \
std::is_same<ReturnType, \
absl::base_internal::invoke_result_t< \
absl::decay_t<F> inv_quals, P...>>>::value>>; \
\
\
template <class F> \
using CallIsNoexceptIfSigIsNoexcept = \
TrueAlias<ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, \
noex)>; \
\
\
Impl() = default; \
\
\
\
\
template <class F> \
explicit Impl(ConversionConstruct, F&& f) \
: Core(TypedConversionConstruct< \
typename std::decay<F>::type inv_quals>(), \
std::forward<F>(f)) {} \
\
\
template <class T, class... Args> \
explicit Impl(absl::in_place_type_t<T>, Args&&... args) \
: Core(absl::in_place_type<absl::decay_t<T> inv_quals>, \
std::forward<Args>(args)...) {} \
\
\
static ReturnType InvokedAfterMove( \
TypeErasedState*, \
ForwardedParameterType<P>...) noexcept(noex) { \
ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \
std::terminate(); \
} \
\
InvokerType<noex, ReturnType, P...>* ExtractInvoker() cv { \
using QualifiedTestType = int cv ref; \
auto* invoker = this->invoker_; \
if (!std::is_const<QualifiedTestType>::value && \
std::is_rvalue_reference<QualifiedTestType>::value) { \
ABSL_ASSERT([this]() { \
\
const_cast<Impl*>(this)->invoker_ = InvokedAfterMove; \
return this->HasValue(); \
}()); \
} \
return invoker; \
} \
\
\
ReturnType operator()(P... args) cv ref noexcept(noex) { \
assert(this->invoker_ != nullptr); \
return this->ExtractInvoker()( \
const_cast<TypeErasedState*>(&this->state_), \
static_cast<ForwardedParameterType<P>>(args)...); \
} \
}
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \
ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true)
#else
#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false)
#endif
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &);
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&);
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &);
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&);
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&);
ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&);
#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL
#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_
#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false
#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true
#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT
#undef ABSL_INTERNAL_NOEXCEPT_SPEC
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/functional/any_invocable.h"
#include <cstddef>
#include <initializer_list>
#include <memory>
#include <numeric>
#include <type_traits>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
static_assert(absl::internal_any_invocable::kStorageSize >= sizeof(void*),
"These tests assume that the small object storage is at least "
"the size of a pointer.");
namespace {
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
#else
#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
#endif
struct _ {};
template <class T>
struct Wrapper {
template <class U,
class = absl::enable_if_t<std::is_convertible<U, T>::value>>
Wrapper(U&&);
};
static_assert(std::is_constructible<Wrapper<absl::AnyInvocable<void()>>,
Wrapper<absl::AnyInvocable<void()>>>::value,
"");
template <class Qualifiers, class This>
struct QualifiersForThisImpl {
static_assert(std::is_object<This>::value, "");
using type =
absl::conditional_t<std::is_const<Qualifiers>::value, const This, This>&;
};
template <class Qualifiers, class This>
struct QualifiersForThisImpl<Qualifiers&, This>
: QualifiersForThisImpl<Qualifiers, This> {};
template <class Qualifiers, class This>
struct QualifiersForThisImpl<Qualifiers&&, This> {
static_assert(std::is_object<This>::value, "");
using type =
absl::conditional_t<std::is_const<Qualifiers>::value, const This, This>&&;
};
template <class Qualifiers, class This>
using QualifiersForThis =
typename QualifiersForThisImpl<Qualifiers, This>::type;
template <class T, class Fun>
struct GiveQualifiersToFunImpl;
template <class T, class R, class... P>
struct GiveQualifiersToFunImpl<T, R(P...)> {
using type =
absl::conditional_t<std::is_const<T>::value, R(P...) const, R(P...)>;
};
template <class T, class R, class... P>
struct GiveQualifiersToFunImpl<T&, R(P...)> {
using type =
absl::conditional_t<std::is_const<T>::value, R(P...) const&, R(P...)&>;
};
template <class T, class R, class... P>
struct GiveQualifiersToFunImpl<T&&, R(P...)> {
using type =
absl::conditional_t<std::is_const<T>::value, R(P...) const&&, R(P...) &&>;
};
#if defined(__cpp_noexcept_function_type)
template <class T, class R, class... P>
struct GiveQualifiersToFunImpl<T, R(P...) noexcept> {
using type = absl::conditional_t<std::is_const<T>::value,
R(P...) const noexcept, R(P...) noexcept>;
};
template <class T, class R, class... P>
struct GiveQualifiersToFunImpl<T&, R(P...) noexcept> {
using type =
absl::conditional_t<std::is_const<T>::value, R(P...) const & noexcept,
R(P...) & noexcept>;
};
template <class T, class R, class... P>
struct GiveQualifiersToFunImpl<T&&, R(P...) noexcept> {
using type =
absl::conditional_t<std::is_const<T>::value, R(P...) const && noexcept,
R(P...) && noexcept>;
};
#endif
template <class T, class Fun>
using GiveQualifiersToFun = typename GiveQualifiersToFunImpl<T, Fun>::type;
enum class ObjSize { small, large };
template <ObjSize Size>
struct TypeErasedPadding;
template <>
struct TypeErasedPadding<ObjSize::small> {};
template <>
struct TypeErasedPadding<ObjSize::large> {
char dummy_data[absl::internal_any_invocable::kStorageSize + 1] = {};
};
struct Int {
Int(int v) noexcept : value(v) {}
#ifndef _MSC_VER
Int(Int&&) noexcept {
std::abort();
}
#else
Int(Int&& v) noexcept = default;
#endif
operator int() && noexcept { return value; }
int MemberFunctionAdd(int const& b, int c) noexcept {
return value + b + c;
}
int value;
};
enum class Movable { no, yes, nothrow, trivial };
enum class NothrowCall { no, yes };
enum class Destructible { nothrow, trivial };
enum class ObjAlign : std::size_t {
normal = absl::internal_any_invocable::kAlignment,
large = absl::internal_any_invocable::kAlignment * 2,
};
template <Movable Movability, Destructible Destructibility, class Qual,
NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
struct add;
#define ABSL_INTERNALS_ADD(qual) \
template <NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment> \
struct alignas(static_cast<std::size_t>(Alignment)) \
add<Movable::trivial, Destructible::trivial, _ qual, CallExceptionSpec, \
Size, Alignment> : TypeErasedPadding<Size> { \
explicit add(int state_init) : state(state_init) {} \
explicit add(std::initializer_list<int> state_init, int tail) \
: state(std::accumulate(std::begin(state_init), std::end(state_init), \
0) + \
tail) {} \
add(add&& other) = default; \
Int operator()(int a, int b, int c) qual \
ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \
return state + a + b + c; \
} \
int state; \
}; \
\
template <NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment> \
struct alignas(static_cast<std::size_t>(Alignment)) \
add<Movable::trivial, Destructible::nothrow, _ qual, CallExceptionSpec, \
Size, Alignment> : TypeErasedPadding<Size> { \
explicit add(int state_init) : state(state_init) {} \
explicit add(std::initializer_list<int> state_init, int tail) \
: state(std::accumulate(std::begin(state_init), std::end(state_init), \
0) + \
tail) {} \
~add() noexcept {} \
add(add&& other) = default; \
Int operator()(int a, int b, int c) qual \
ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \
return state + a + b + c; \
} \
int state; \
}
#define ABSL_INTERNALS_NOARG
ABSL_INTERNALS_ADD(ABSL_INTERNALS_NOARG);
#undef ABSL_INTERNALS_NOARG
ABSL_INTERNALS_ADD(const);
ABSL_INTERNALS_ADD(&);
ABSL_INTERNALS_ADD(const&);
ABSL_INTERNALS_ADD(&&);
ABSL_INTERNALS_ADD(const&&);
#undef ABSL_INTERNALS_ADD
template <Destructible Destructibility, class Qual,
NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
struct add<Movable::no, Destructibility, Qual, CallExceptionSpec, Size,
Alignment> : private add<Movable::trivial, Destructibility, Qual,
CallExceptionSpec, Size, Alignment> {
using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
Size, Alignment>;
explicit add(int state_init) : Base(state_init) {}
explicit add(std::initializer_list<int> state_init, int tail)
: Base(state_init, tail) {}
add(add&&) = delete;
using Base::operator();
using Base::state;
};
template <Destructible Destructibility, class Qual,
NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
struct add<Movable::yes, Destructibility, Qual, CallExceptionSpec, Size,
Alignment> : private add<Movable::trivial, Destructibility, Qual,
CallExceptionSpec, Size, Alignment> {
using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
Size, Alignment>;
explicit add(int state_init) : Base(state_init) {}
explicit add(std::initializer_list<int> state_init, int tail)
: Base(state_init, tail) {}
add(add&& other) noexcept(false) : Base(other.state) {}
using Base::operator();
using Base::state;
};
template <Destructible Destructibility, class Qual,
NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
struct add<Movable::nothrow, Destructibility, Qual, CallExceptionSpec, Size,
Alignment> : private add<Movable::trivial, Destructibility, Qual,
CallExceptionSpec, Size, Alignment> {
using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
Size, Alignment>;
explicit add(int state_init) : Base(state_init) {}
explicit add(std::initializer_list<int> state_init, int tail)
: Base(state_init, tail) {}
add(add&& other) noexcept : Base(other.state) {}
using Base::operator();
using Base::state;
};
Int add_function(Int&& a, int b, int c) noexcept { return a.value + b + c; }
Int mult_function(Int&& a, int b, int c) noexcept { return a.value * b * c; }
Int square_function(Int const&& a) noexcept { return a.value * a.value; }
template <class Sig>
using AnyInvocable = absl::AnyInvocable<Sig>;
template <Movable Movability, Destructible Destructibility, class Qual,
NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
struct TestParams {
static constexpr Movable kMovability = Movability;
static constexpr Destructible kDestructibility = Destructibility;
using Qualifiers = Qual;
static constexpr NothrowCall kCallExceptionSpec = CallExceptionSpec;
static constexpr bool kIsNoexcept = kCallExceptionSpec == NothrowCall::yes;
static constexpr bool kIsRvalueQualified =
std::is_rvalue_reference<Qual>::value;
static constexpr ObjSize kSize = Size;
static constexpr ObjAlign kAlignment = Alignment;
using UnqualifiedUnaryFunType = int(Int const&&)
ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes);
using UnaryFunType = GiveQualifiersToFun<Qualifiers, UnqualifiedUnaryFunType>;
using MemObjPtrType = int(Int::*);
using UnaryAnyInvType = AnyInvocable<UnaryFunType>;
using UnaryThisParamType = QualifiersForThis<Qualifiers, UnaryAnyInvType>;
template <class T>
static UnaryThisParamType ToUnaryThisParam(T&& fun) {
return static_cast<UnaryThisParamType>(fun);
}
using ResultType = Int;
using AnyInvocableFunTypeNotNoexcept = Int(Int, const int&, int);
using UnqualifiedFunType =
typename std::conditional<kIsNoexcept, Int(Int, const int&, int) noexcept,
Int(Int, const int&, int)>::type;
using FunType = GiveQualifiersToFun<Qualifiers, UnqualifiedFunType>;
using MemFunPtrType =
typename std::conditional<kIsNoexcept,
Int (Int::*)(const int&, int) noexcept,
Int (Int::*)(const int&, int)>::type;
using AnyInvType = AnyInvocable<FunType>;
using AddType = add<kMovability, kDestructibility, Qualifiers,
kCallExceptionSpec, kSize, kAlignment>;
using ThisParamType = QualifiersForThis<Qualifiers, AnyInvType>;
template <class T>
static ThisParamType ToThisParam(T&& fun) {
return static_cast<ThisParamType>(fun);
}
using UnqualifiedVoidFunType =
typename std::conditional<kIsNoexcept,
void(Int, const int&, int) noexcept,
void(Int, const int&, int)>::type;
using VoidFunType = GiveQualifiersToFun<Qualifiers, UnqualifiedVoidFunType>;
using VoidAnyInvType = AnyInvocable<VoidFunType>;
using VoidThisParamType = QualifiersForThis<Qualifiers, VoidAnyInvType>;
template <class T>
static VoidThisParamType ToVoidThisParam(T&& fun) {
return static_cast<VoidThisParamType>(fun);
}
using CompatibleAnyInvocableFunType =
absl::conditional_t<std::is_rvalue_reference<Qual>::value,
GiveQualifiersToFun<const _&&, UnqualifiedFunType>,
GiveQualifiersToFun<const _&, UnqualifiedFunType>>;
using CompatibleAnyInvType = AnyInvocable<CompatibleAnyInvocableFunType>;
using IncompatibleInvocable =
absl::conditional_t<std::is_rvalue_reference<Qual>::value,
GiveQualifiersToFun<_&, UnqualifiedFunType>(_::*),
GiveQualifiersToFun<_&&, UnqualifiedFunType>(_::*)>;
};
template <class MemberPtrType>
struct MemberTypeOfImpl;
template <class Class, class T>
struct MemberTypeOfImpl<T(Class::*)> {
using type = T;
};
template <class MemberPtrType>
using MemberTypeOf = typename MemberTypeOfImpl<MemberPtrType>::type;
template <class T, class = void>
struct IsMemberSwappableImpl : std::false_type {
static constexpr bool kIsNothrow = false;
};
template <class T>
struct IsMemberSwappableImpl<
T, absl::void_t<decltype(std::declval<T&>().swap(std::declval<T&>()))>>
: std::true_type {
static constexpr bool kIsNothrow =
noexcept(std::declval<T&>().swap(std::declval<T&>()));
};
template <class T>
using IsMemberSwappable = IsMemberSwappableImpl<T>;
template <class T>
using IsNothrowMemberSwappable =
std::integral_constant<bool, IsMemberSwappableImpl<T>::kIsNothrow>;
template <class T>
class AnyInvTestBasic : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestBasic);
TYPED_TEST_P(AnyInvTestBasic, DefaultConstruction) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun;
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_TRUE(std::is_nothrow_default_constructible<AnyInvType>::value);
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionNullptr) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun = nullptr;
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_TRUE(
(std::is_nothrow_constructible<AnyInvType, std::nullptr_t>::value));
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionNullFunctionPtr) {
using AnyInvType = typename TypeParam::AnyInvType;
using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
UnqualifiedFunType* const null_fun_ptr = nullptr;
AnyInvType fun = null_fun_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberFunctionPtr) {
using AnyInvType = typename TypeParam::AnyInvType;
using MemFunPtrType = typename TypeParam::MemFunPtrType;
const MemFunPtrType null_mem_fun_ptr = nullptr;
AnyInvType fun = null_mem_fun_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberObjectPtr) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
using MemObjPtrType = typename TypeParam::MemObjPtrType;
const MemObjPtrType null_mem_obj_ptr = nullptr;
UnaryAnyInvType fun = null_mem_obj_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberFunctionPtr) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun = &Int::MemberFunctionAdd;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberObjectPtr) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
UnaryAnyInvType fun = &Int::value;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionFunctionReferenceDecay) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun = add_function;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableEmpty) {
using AnyInvType = typename TypeParam::AnyInvType;
using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
CompatibleAnyInvType other;
AnyInvType fun = std::move(other);
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_EQ(other, nullptr);
EXPECT_EQ(nullptr, other);
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableNonempty) {
using AnyInvType = typename TypeParam::AnyInvType;
using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
CompatibleAnyInvType other = &add_function;
AnyInvType fun = std::move(other);
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_EQ(other, nullptr);
EXPECT_EQ(nullptr, other);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestBasic, ConversionToBool) {
using AnyInvType = typename TypeParam::AnyInvType;
{
AnyInvType fun;
EXPECT_FALSE(fun ? true : false);
EXPECT_TRUE(
(std::is_nothrow_constructible<bool, const AnyInvType&>::value));
EXPECT_FALSE((std::is_convertible<const AnyInvType&, bool>::value));
}
{
AnyInvType fun = &add_function;
EXPECT_TRUE(fun ? true : false);
}
}
TYPED_TEST_P(AnyInvTestBasic, Invocation) {
using AnyInvType = typename TypeParam::AnyInvType;
using FunType = typename TypeParam::FunType;
using AnyInvCallType = MemberTypeOf<decltype(&AnyInvType::operator())>;
EXPECT_TRUE((std::is_same<AnyInvCallType, FunType>::value));
AnyInvType fun = &add_function;
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceConstruction) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun(absl::in_place_type<AddType>, 5);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceConstructionInitializerList) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun(absl::in_place_type<AddType>, {1, 2, 3, 4}, 5);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(39, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstruction) {
using AnyInvType = typename TypeParam::AnyInvType;
using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
AnyInvType fun(absl::in_place_type<UnqualifiedFunType*>, nullptr);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstructionValueInit) {
using AnyInvType = typename TypeParam::AnyInvType;
using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
AnyInvType fun(absl::in_place_type<UnqualifiedFunType*>);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstruction) {
using AnyInvType = typename TypeParam::AnyInvType;
using MemFunPtrType = typename TypeParam::MemFunPtrType;
AnyInvType fun(absl::in_place_type<MemFunPtrType>, nullptr);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstructionValueInit) {
using AnyInvType = typename TypeParam::AnyInvType;
using MemFunPtrType = typename TypeParam::MemFunPtrType;
AnyInvType fun(absl::in_place_type<MemFunPtrType>);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstruction) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
using MemObjPtrType = typename TypeParam::MemObjPtrType;
UnaryAnyInvType fun(absl::in_place_type<MemObjPtrType>, nullptr);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstructionValueInit) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
using MemObjPtrType = typename TypeParam::MemObjPtrType;
UnaryAnyInvType fun(absl::in_place_type<MemObjPtrType>);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, InPlaceVoidCovarianceConstruction) {
using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
using AddType = typename TypeParam::AddType;
VoidAnyInvType fun(absl::in_place_type<AddType>, 5);
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromEmpty) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType source_fun;
AnyInvType fun(std::move(source_fun));
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_TRUE(std::is_nothrow_move_constructible<AnyInvType>::value);
}
TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromNonEmpty) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType source_fun(absl::in_place_type<AddType>, 5);
AnyInvType fun(std::move(source_fun));
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_TRUE(std::is_nothrow_move_constructible<AnyInvType>::value);
}
TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrEmpty) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun;
EXPECT_TRUE(fun == nullptr);
EXPECT_TRUE(nullptr == fun);
EXPECT_FALSE(fun != nullptr);
EXPECT_FALSE(nullptr != fun);
}
TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrNonempty) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun(absl::in_place_type<AddType>, 5);
EXPECT_FALSE(fun == nullptr);
EXPECT_FALSE(nullptr == fun);
EXPECT_TRUE(fun != nullptr);
EXPECT_TRUE(nullptr != fun);
}
TYPED_TEST_P(AnyInvTestBasic, ResultType) {
using AnyInvType = typename TypeParam::AnyInvType;
using ExpectedResultType = typename TypeParam::ResultType;
EXPECT_TRUE((std::is_same<typename AnyInvType::result_type,
ExpectedResultType>::value));
}
template <class T>
class AnyInvTestCombinatoric : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestCombinatoric);
TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType source_fun;
AnyInvType fun;
fun = std::move(source_fun);
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyLhsNonemptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType source_fun(absl::in_place_type<AddType>, 5);
AnyInvType fun;
fun = std::move(source_fun);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyEmptyLhsRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType source_fun;
AnyInvType fun(absl::in_place_type<AddType>, 5);
fun = std::move(source_fun);
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyLhsNonemptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType source_fun(absl::in_place_type<AddType>, 5);
AnyInvType fun(absl::in_place_type<AddType>, 20);
fun = std::move(source_fun);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignEmpty) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType source_fun;
source_fun = std::move(source_fun);
}
TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignNonempty) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType source_fun(absl::in_place_type<AddType>, 5);
source_fun = std::move(source_fun);
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun;
fun = nullptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
UnqualifiedFunType* const null_fun_ptr = nullptr;
AnyInvType fun;
fun = null_fun_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using MemFunPtrType = typename TypeParam::MemFunPtrType;
const MemFunPtrType null_mem_fun_ptr = nullptr;
AnyInvType fun;
fun = null_mem_fun_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrEmptyLhs) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
using MemObjPtrType = typename TypeParam::MemObjPtrType;
const MemObjPtrType null_mem_obj_ptr = nullptr;
UnaryAnyInvType fun;
fun = null_mem_obj_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun;
fun = &Int::MemberFunctionAdd;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrEmptyLhs) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
UnaryAnyInvType fun;
fun = &Int::value;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun;
fun = add_function;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric,
AssignCompatibleAnyInvocableEmptyLhsEmptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
CompatibleAnyInvType other;
AnyInvType fun;
fun = std::move(other);
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_EQ(other, nullptr);
EXPECT_EQ(nullptr, other);
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric,
AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
CompatibleAnyInvType other = &add_function;
AnyInvType fun;
fun = std::move(other);
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun = &mult_function;
fun = nullptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
UnqualifiedFunType* const null_fun_ptr = nullptr;
AnyInvType fun = &mult_function;
fun = null_fun_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using MemFunPtrType = typename TypeParam::MemFunPtrType;
const MemFunPtrType null_mem_fun_ptr = nullptr;
AnyInvType fun = &mult_function;
fun = null_mem_fun_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrNonemptyLhs) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
using MemObjPtrType = typename TypeParam::MemObjPtrType;
const MemObjPtrType null_mem_obj_ptr = nullptr;
UnaryAnyInvType fun = &square_function;
fun = null_mem_obj_ptr;
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun = &mult_function;
fun = &Int::MemberFunctionAdd;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrNonemptyLhs) {
using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
UnaryAnyInvType fun = &square_function;
fun = &Int::value;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
}
TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
AnyInvType fun = &mult_function;
fun = add_function;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric,
AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
CompatibleAnyInvType other;
AnyInvType fun = &mult_function;
fun = std::move(other);
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_EQ(other, nullptr);
EXPECT_EQ(nullptr, other);
EXPECT_FALSE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestCombinatoric,
AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
CompatibleAnyInvType other = &add_function;
AnyInvType fun = &mult_function;
fun = std::move(other);
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsEmptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
{
AnyInvType fun;
AnyInvType other;
using std::swap;
swap(fun, other);
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_TRUE(
absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
}
{
AnyInvType fun;
AnyInvType other;
fun.swap(other);
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
}
}
TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsNonemptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
{
AnyInvType fun;
AnyInvType other(absl::in_place_type<AddType>, 5);
using std::swap;
swap(fun, other);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_TRUE(
absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
}
{
AnyInvType fun;
AnyInvType other(absl::in_place_type<AddType>, 5);
fun.swap(other);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_FALSE(static_cast<bool>(other));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
}
}
TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsEmptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
{
AnyInvType fun(absl::in_place_type<AddType>, 5);
AnyInvType other;
using std::swap;
swap(fun, other);
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_TRUE(static_cast<bool>(other));
EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
EXPECT_TRUE(
absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
}
{
AnyInvType fun(absl::in_place_type<AddType>, 5);
AnyInvType other;
fun.swap(other);
EXPECT_FALSE(static_cast<bool>(fun));
EXPECT_TRUE(static_cast<bool>(other));
EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
}
}
TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsNonemptyRhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
{
AnyInvType fun(absl::in_place_type<AddType>, 5);
AnyInvType other(absl::in_place_type<AddType>, 6);
using std::swap;
swap(fun, other);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_TRUE(static_cast<bool>(other));
EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
EXPECT_TRUE(
absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
}
{
AnyInvType fun(absl::in_place_type<AddType>, 5);
AnyInvType other(absl::in_place_type<AddType>, 6);
fun.swap(other);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_TRUE(static_cast<bool>(other));
EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
}
}
template <class T>
class AnyInvTestMovable : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestMovable);
TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionUserDefinedType) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun(AddType(5));
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionVoidCovariance) {
using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
using AddType = typename TypeParam::AddType;
VoidAnyInvType fun(AddType(5));
EXPECT_TRUE(static_cast<bool>(fun));
}
TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun;
fun = AddType(5);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun = &add_function;
fun = AddType(5);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
}
TYPED_TEST_P(AnyInvTestMovable, ConversionAssignVoidCovariance) {
using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
using AddType = typename TypeParam::AddType;
VoidAnyInvType fun;
fun = AddType(5);
EXPECT_TRUE(static_cast<bool>(fun));
}
template <class T>
class AnyInvTestNoexceptFalse : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse);
TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionConstructionConstraints) {
using AnyInvType = typename TypeParam::AnyInvType;
EXPECT_TRUE((std::is_constructible<
AnyInvType,
typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
EXPECT_FALSE((
std::is_constructible<AnyInvType,
typename TypeParam::IncompatibleInvocable>::value));
}
TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionAssignConstraints) {
using AnyInvType = typename TypeParam::AnyInvType;
EXPECT_TRUE((std::is_assignable<
AnyInvType&,
typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
EXPECT_FALSE(
(std::is_assignable<AnyInvType&,
typename TypeParam::IncompatibleInvocable>::value));
}
template <class T>
class AnyInvTestNoexceptTrue : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue);
TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionConstructionConstraints) {
#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
GTEST_SKIP() << "Noexcept was not part of the type system before C++17.";
#else
using AnyInvType = typename TypeParam::AnyInvType;
EXPECT_FALSE((std::is_constructible<
AnyInvType,
typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
EXPECT_FALSE((
std::is_constructible<AnyInvType,
typename TypeParam::IncompatibleInvocable>::value));
#endif
}
TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionAssignConstraints) {
#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
GTEST_SKIP() << "Noexcept was not part of the type system before C++17.";
#else
using AnyInvType = typename TypeParam::AnyInvType;
EXPECT_FALSE((std::is_assignable<
AnyInvType&,
typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
EXPECT_FALSE(
(std::is_assignable<AnyInvType&,
typename TypeParam::IncompatibleInvocable>::value));
#endif
}
template <class T>
class AnyInvTestNonRvalue : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestNonRvalue);
TYPED_TEST_P(AnyInvTestNonRvalue, ConversionConstructionReferenceWrapper) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AddType add(4);
AnyInvType fun = std::ref(add);
add.state = 5;
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
}
TYPED_TEST_P(AnyInvTestNonRvalue, NonMoveableResultType) {
#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
GTEST_SKIP() << "Copy/move elision was not standard before C++17";
#else
struct Result {
int x;
explicit Result(const int x_in) : x(x_in) {}
Result(Result&&) = delete;
};
static_assert(!std::is_move_constructible<Result>::value, "");
static_assert(!std::is_copy_constructible<Result>::value, "");
const auto return_17 = []() noexcept { return Result(17); };
EXPECT_EQ(17, return_17().x);
using UnqualifiedFun =
absl::conditional_t<TypeParam::kIsNoexcept, Result() noexcept, Result()>;
using Fun =
GiveQualifiersToFun<typename TypeParam::Qualifiers, UnqualifiedFun>;
AnyInvocable<Fun> any_inv(return_17);
EXPECT_EQ(17, any_inv().x);
#endif
}
TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperEmptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AddType add(4);
AnyInvType fun;
fun = std::ref(add);
add.state = 5;
EXPECT_TRUE(
(std::is_nothrow_assignable<AnyInvType&,
std::reference_wrapper<AddType>>::value));
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
}
TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperNonemptyLhs) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AddType add(4);
AnyInvType fun = &mult_function;
fun = std::ref(add);
add.state = 5;
EXPECT_TRUE(
(std::is_nothrow_assignable<AnyInvType&,
std::reference_wrapper<AddType>>::value));
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
EXPECT_TRUE(static_cast<bool>(fun));
EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
}
template <class T>
class AnyInvTestRvalue : public ::testing::Test {};
TYPED_TEST_SUITE_P(AnyInvTestRvalue);
TYPED_TEST_P(AnyInvTestRvalue, ConversionConstructionReferenceWrapper) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
EXPECT_FALSE((
std::is_convertible<std::reference_wrapper<AddType>, AnyInvType>::value));
}
TYPED_TEST_P(AnyInvTestRvalue, NonMoveableResultType) {
#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
GTEST_SKIP() << "Copy/move elision was not standard before C++17";
#else
struct Result {
int x;
explicit Result(const int x_in) : x(x_in) {}
Result(Result&&) = delete;
};
static_assert(!std::is_move_constructible<Result>::value, "");
static_assert(!std::is_copy_constructible<Result>::value, "");
const auto return_17 = []() noexcept { return Result(17); };
EXPECT_EQ(17, return_17().x);
using UnqualifiedFun =
absl::conditional_t<TypeParam::kIsNoexcept, Result() noexcept, Result()>;
using Fun =
GiveQualifiersToFun<typename TypeParam::Qualifiers, UnqualifiedFun>;
EXPECT_EQ(17, AnyInvocable<Fun>(return_17)().x);
#endif
}
TYPED_TEST_P(AnyInvTestRvalue, ConversionAssignReferenceWrapper) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
EXPECT_FALSE((
std::is_assignable<AnyInvType&, std::reference_wrapper<AddType>>::value));
}
TYPED_TEST_P(AnyInvTestRvalue, NonConstCrashesOnSecondCall) {
using AnyInvType = typename TypeParam::AnyInvType;
using AddType = typename TypeParam::AddType;
AnyInvType fun(absl::in_place_type<AddType>, 5);
EXPECT_TRUE(static_cast<bool>(fun));
std::move(fun)(7, 8, 9);
EXPECT_TRUE(static_cast<bool>(fun));
#if !defined(NDEBUG)
EXPECT_DEATH_IF_SUPPORTED(std::move(fun)(7, 8, 9), "");
#endif
}
TYPED_TEST_P(AnyInvTestRvalue, QualifierIndependentObjectLifetime) {
using AnyInvType = typename TypeParam::AnyInvType;
auto refs = std::make_shared<std::nullptr_t>();
{
AnyInvType fun([refs](auto&&...) noexcept { return 0; });
EXPECT_GT(refs.use_count(), 1);
std::move(fun)(7, 8, 9);
EXPECT_GT(refs.use_count(), 1);
}
EXPECT_EQ(refs.use_count(), 1);
}
template <Movable Mov, Destructible Dest, NothrowCall CallExceptionSpec,
ObjSize Size, ObjAlign Align>
using NonRvalueQualifiedTestParams = ::testing::Types<
TestParams<Mov, Dest, _, CallExceptionSpec, Size, Align>,
TestParams<Mov, Dest, const _, CallExceptionSpec, Size, Align>,
TestParams<Mov, Dest, _&, CallExceptionSpec, Size, Align>,
TestParams<Mov, Dest, const _&, CallExceptionSpec, Size, Align>>;
template <Movable Mov, Destructible Dest, NothrowCall CallExceptionSpec,
ObjSize Size, ObjAlign Align>
using RvalueQualifiedTestParams = ::testing::Types<
TestParams<Mov, Dest, _&&, CallExceptionSpec, Size, Align>,
TestParams<Mov, Dest, const _&&, CallExceptionSpec, Size, Align>
>;
using TestParameterListNonRvalueQualifiersNothrowCall =
NonRvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
NothrowCall::yes, ObjSize::small,
ObjAlign::normal>;
using TestParameterListRvalueQualifiersNothrowCall =
RvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
NothrowCall::yes, ObjSize::small,
ObjAlign::normal>;
using TestParameterListNonRvalueQualifiersCallMayThrow =
NonRvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
NothrowCall::no, ObjSize::small,
ObjAlign::normal>;
using TestParameterListRvalueQualifiersCallMayThrow =
RvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
NothrowCall::no, ObjSize::small,
ObjAlign::normal>;
using TestParameterListRemoteMovable = ::testing::Types<
TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>,
TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>,
TestParams<Movable::yes, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::yes, Destructible::trivial, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>,
TestParams<Movable::trivial, Destructible::nothrow, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>,
TestParams<Movable::nothrow, Destructible::nothrow, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>,
TestParams<Movable::yes, Destructible::nothrow, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::yes, Destructible::nothrow, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
,
TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::large>,
TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::large>,
TestParams<Movable::trivial, Destructible::nothrow, _, NothrowCall::no,
ObjSize::small, ObjAlign::large>,
TestParams<Movable::nothrow, Destructible::nothrow, _, NothrowCall::no,
ObjSize::small, ObjAlign::large>
#endif
>;
using TestParameterListRemoteNonMovable = ::testing::Types<
TestParams<Movable::no, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::no, Destructible::trivial, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>,
TestParams<Movable::no, Destructible::nothrow, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::no, Destructible::nothrow, _, NothrowCall::no,
ObjSize::large, ObjAlign::normal>
>;
using TestParameterListLocal = ::testing::Types<
TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>,
TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
ObjSize::small, ObjAlign::normal>
>;
REGISTER_TYPED_TEST_SUITE_P(
AnyInvTestBasic, DefaultConstruction, ConstructionNullptr,
ConstructionNullFunctionPtr, ConstructionNullMemberFunctionPtr,
ConstructionNullMemberObjectPtr, ConstructionMemberFunctionPtr,
ConstructionMemberObjectPtr, ConstructionFunctionReferenceDecay,
ConstructionCompatibleAnyInvocableEmpty,
ConstructionCompatibleAnyInvocableNonempty, InPlaceConstruction,
ConversionToBool, Invocation, InPlaceConstructionInitializerList,
InPlaceNullFunPtrConstruction, InPlaceNullFunPtrConstructionValueInit,
InPlaceNullMemFunPtrConstruction, InPlaceNullMemFunPtrConstructionValueInit,
InPlaceNullMemObjPtrConstruction, InPlaceNullMemObjPtrConstructionValueInit,
InPlaceVoidCovarianceConstruction, MoveConstructionFromEmpty,
MoveConstructionFromNonEmpty, ComparisonWithNullptrEmpty,
ComparisonWithNullptrNonempty, ResultType);
INSTANTIATE_TYPED_TEST_SUITE_P(
NonRvalueCallMayThrow, AnyInvTestBasic,
TestParameterListNonRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestBasic,
TestParameterListRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestBasic,
TestParameterListRemoteMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestBasic,
TestParameterListRemoteNonMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestBasic, TestParameterListLocal);
INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestBasic,
TestParameterListNonRvalueQualifiersNothrowCall);
INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestBasic,
TestParameterListRvalueQualifiersNothrowCall);
REGISTER_TYPED_TEST_SUITE_P(
AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs,
MoveAssignEmptyLhsNonemptyRhs, MoveAssignNonemptyEmptyLhsRhs,
MoveAssignNonemptyLhsNonemptyRhs, SelfMoveAssignEmpty,
SelfMoveAssignNonempty, AssignNullptrEmptyLhs,
AssignNullFunctionPtrEmptyLhs, AssignNullMemberFunctionPtrEmptyLhs,
AssignNullMemberObjectPtrEmptyLhs, AssignMemberFunctionPtrEmptyLhs,
AssignMemberObjectPtrEmptyLhs, AssignFunctionReferenceDecayEmptyLhs,
AssignCompatibleAnyInvocableEmptyLhsEmptyRhs,
AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs, AssignNullptrNonemptyLhs,
AssignNullFunctionPtrNonemptyLhs, AssignNullMemberFunctionPtrNonemptyLhs,
AssignNullMemberObjectPtrNonemptyLhs, AssignMemberFunctionPtrNonemptyLhs,
AssignMemberObjectPtrNonemptyLhs, AssignFunctionReferenceDecayNonemptyLhs,
AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs,
AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs, SwapEmptyLhsEmptyRhs,
SwapEmptyLhsNonemptyRhs, SwapNonemptyLhsEmptyRhs,
SwapNonemptyLhsNonemptyRhs);
INSTANTIATE_TYPED_TEST_SUITE_P(
NonRvalueCallMayThrow, AnyInvTestCombinatoric,
TestParameterListNonRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestCombinatoric,
TestParameterListRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestCombinatoric,
TestParameterListRemoteMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestCombinatoric,
TestParameterListRemoteNonMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestCombinatoric,
TestParameterListLocal);
INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestCombinatoric,
TestParameterListNonRvalueQualifiersNothrowCall);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestCombinatoric,
TestParameterListRvalueQualifiersNothrowCall);
REGISTER_TYPED_TEST_SUITE_P(AnyInvTestMovable,
ConversionConstructionUserDefinedType,
ConversionConstructionVoidCovariance,
ConversionAssignUserDefinedTypeEmptyLhs,
ConversionAssignUserDefinedTypeNonemptyLhs,
ConversionAssignVoidCovariance);
INSTANTIATE_TYPED_TEST_SUITE_P(
NonRvalueCallMayThrow, AnyInvTestMovable,
TestParameterListNonRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestMovable,
TestParameterListRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestMovable,
TestParameterListRemoteMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestMovable,
TestParameterListLocal);
INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestMovable,
TestParameterListNonRvalueQualifiersNothrowCall);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestMovable,
TestParameterListRvalueQualifiersNothrowCall);
REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse,
ConversionConstructionConstraints,
ConversionAssignConstraints);
INSTANTIATE_TYPED_TEST_SUITE_P(
NonRvalueCallMayThrow, AnyInvTestNoexceptFalse,
TestParameterListNonRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestNoexceptFalse,
TestParameterListRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNoexceptFalse,
TestParameterListRemoteMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNoexceptFalse,
TestParameterListRemoteNonMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNoexceptFalse,
TestParameterListLocal);
REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue,
ConversionConstructionConstraints,
ConversionAssignConstraints);
INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNoexceptTrue,
TestParameterListNonRvalueQualifiersNothrowCall);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestNoexceptTrue,
TestParameterListRvalueQualifiersNothrowCall);
REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNonRvalue,
ConversionConstructionReferenceWrapper,
NonMoveableResultType,
ConversionAssignReferenceWrapperEmptyLhs,
ConversionAssignReferenceWrapperNonemptyLhs);
INSTANTIATE_TYPED_TEST_SUITE_P(
NonRvalueCallMayThrow, AnyInvTestNonRvalue,
TestParameterListNonRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNonRvalue,
TestParameterListRemoteMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNonRvalue,
TestParameterListRemoteNonMovable);
INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNonRvalue,
TestParameterListLocal);
INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNonRvalue,
TestParameterListNonRvalueQualifiersNothrowCall);
REGISTER_TYPED_TEST_SUITE_P(AnyInvTestRvalue,
ConversionConstructionReferenceWrapper,
NonMoveableResultType,
ConversionAssignReferenceWrapper,
NonConstCrashesOnSecondCall,
QualifierIndependentObjectLifetime);
INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestRvalue,
TestParameterListRvalueQualifiersCallMayThrow);
INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestRvalue,
TestParameterListRvalueQualifiersNothrowCall);
static_assert(
std::is_convertible<void (*)(), absl::AnyInvocable<void() &&>>::value, "");
static_assert(!std::is_convertible<void*, absl::AnyInvocable<void() &&>>::value,
"");
#undef ABSL_INTERNAL_NOEXCEPT_SPEC
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/functional/internal/any_invocable.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/functional/any_invocable_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
812071a9-cc21-46f8-accc-2f96d4671015 | cpp | google/tensorstore | small_bit_set | tensorstore/util/small_bit_set.h | tensorstore/util/small_bit_set_test.cc | #ifndef TENSORSTORE_UTIL_SMALL_BIT_SET_H_
#define TENSORSTORE_UTIL_SMALL_BIT_SET_H_
#include <stddef.h>
#include <cassert>
#include <iterator>
#include <ostream>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/numeric/bits.h"
#include "tensorstore/internal/integer_types.h"
namespace tensorstore {
template <typename T>
class BitRef {
static_assert(std::is_unsigned_v<T>, "Storage type T must be unsigned.");
public:
friend class BitRef<const T>;
using block_type = T;
using value_type = bool;
using element_type = bool;
constexpr static ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr BitRef(T* block ABSL_ATTRIBUTE_LIFETIME_BOUND, ptrdiff_t offset)
: block_(block), mask_(static_cast<T>(1) << (offset % kBitsPerBlock)) {
assert(offset >= 0);
}
constexpr operator bool() const { return *block_ & mask_; }
const BitRef& operator=(bool value) const {
*block_ = value ? (*block_ | mask_) : (*block_ & ~mask_);
return *this;
}
const BitRef& operator=(BitRef value) const {
return (*this = static_cast<bool>(value));
}
friend void swap(BitRef a, bool& x) {
bool temp = a;
a = x;
x = temp;
}
friend void swap(bool& x, BitRef a) {
bool temp = a;
a = x;
x = temp;
}
private:
T* block_;
T mask_;
};
template <typename T, typename U>
std::enable_if_t<(!std::is_const_v<T> && !std::is_const_v<U>)> swap(
BitRef<T> a, BitRef<U> b) {
bool temp = a;
a = b;
b = temp;
}
template <typename T>
std::enable_if_t<(!std::is_const_v<T>)> swap(BitRef<T> a, BitRef<T> b) {
bool temp = a;
a = b;
b = temp;
}
template <typename T>
class BitIterator {
static_assert(std::is_unsigned_v<T>, "Storage type T must be unsigned.");
public:
using pointer = BitIterator<T>;
using const_pointer = BitIterator<const T>;
using reference = BitRef<T>;
using const_reference = BitRef<const T>;
using difference_type = ptrdiff_t;
using value_type = bool;
using iterator_category = std::random_access_iterator_tag;
constexpr static ptrdiff_t kBitsPerBlock = sizeof(T) * 8;
constexpr BitIterator() : base_(nullptr), offset_(0) {}
constexpr BitIterator(T* base ABSL_ATTRIBUTE_LIFETIME_BOUND, ptrdiff_t offset)
: base_(base), offset_(offset) {}
template <typename U, std::enable_if_t<std::is_same_v<const U, T>>* = nullptr>
constexpr BitIterator(BitIterator<U> other)
: base_(other.base()), offset_(other.offset()) {}
constexpr T* base() const { return base_; }
constexpr ptrdiff_t offset() const { return offset_; }
constexpr BitRef<T> operator*() const {
return BitRef<T>(base() + offset() / kBitsPerBlock, offset());
}
constexpr BitRef<T> operator[](ptrdiff_t offset) const {
return *(*this + offset);
}
BitIterator& operator++() {
++offset_;
return *this;
}
BitIterator& operator--() {
--offset_;
return *this;
}
BitIterator operator++(int) {
BitIterator temp = *this;
++offset_;
return temp;
}
BitIterator operator--(int) {
BitIterator temp = *this;
--offset_;
return temp;
}
friend BitIterator operator+(BitIterator it, ptrdiff_t offset) {
it += offset;
return it;
}
friend BitIterator operator+(ptrdiff_t offset, BitIterator it) {
it += offset;
return it;
}
BitIterator& operator+=(ptrdiff_t x) {
offset_ += x;
return *this;
}
friend BitIterator operator-(BitIterator it, ptrdiff_t offset) {
it -= offset;
return it;
}
BitIterator& operator-=(ptrdiff_t x) {
offset_ -= x;
return *this;
}
friend constexpr ptrdiff_t operator-(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() - b.offset();
}
friend constexpr bool operator==(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() == b.offset();
}
friend constexpr bool operator!=(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() != b.offset();
}
friend constexpr bool operator<(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() < b.offset();
}
friend constexpr bool operator<=(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() <= b.offset();
}
friend constexpr bool operator>(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() > b.offset();
}
friend constexpr bool operator>=(BitIterator a, BitIterator b) {
assert(a.base() == b.base());
return a.offset() >= b.offset();
}
private:
T* base_;
ptrdiff_t offset_;
};
namespace bitset_impl {
template <typename Iterator, size_t N>
class BoolsView {
public:
using iterator = Iterator;
using value_type = typename iterator::value_type;
using difference_type = typename iterator::difference_type;
using reference = typename iterator::reference;
explicit BoolsView(iterator it) : it_(std::move(it)) {}
constexpr iterator begin() const { return it_; }
constexpr iterator end() const { return iterator(it_.base(), N); }
private:
iterator it_;
};
template <typename Uint>
class OneBitsIterator {
public:
using value_type = int;
using difference_type = int;
using reference = int;
OneBitsIterator() : value_(0) {}
explicit OneBitsIterator(Uint value) : value_(value) {}
friend constexpr bool operator==(OneBitsIterator a, OneBitsIterator b) {
return a.value_ == b.value_;
}
friend constexpr bool operator!=(OneBitsIterator a, OneBitsIterator b) {
return !(a == b);
}
constexpr int operator*() const { return absl::countr_zero(value_); }
constexpr OneBitsIterator& operator++() {
Uint t = value_ & -value_;
value_ ^= t;
return *this;
}
constexpr OneBitsIterator operator++(int) {
auto copy = *this;
++*this;
return copy;
}
private:
Uint value_;
};
template <typename Uint>
class IndexView {
public:
IndexView(Uint bits) : bits_(bits) {}
using const_iterator = OneBitsIterator<Uint>;
using value_type = typename const_iterator::value_type;
using difference_type = typename const_iterator::difference_type;
using reference = typename const_iterator::reference;
constexpr const_iterator begin() const { return const_iterator(bits_); }
constexpr const_iterator end() const { return const_iterator(); }
constexpr int front() const { return *begin(); }
private:
Uint bits_;
};
}
template <size_t N>
class SmallBitSet {
public:
using Uint = typename internal::uint_type<N>::type;
using value_type = bool;
using reference = BitRef<Uint>;
constexpr SmallBitSet() : bits_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_same_v<T, bool>>>
constexpr SmallBitSet(T value) : bits_(value * ~Uint(0)) {}
static constexpr SmallBitSet FromUint(Uint bits) {
SmallBitSet v;
v.bits_ = bits;
return v;
}
template <size_t NumBits, typename = std::enable_if_t<(NumBits <= N)>>
static constexpr SmallBitSet FromIndices(const int (&positions)[NumBits]) {
return FromIndexRange(std::begin(positions), std::end(positions));
}
template <typename Range>
static constexpr SmallBitSet FromIndexRange(Range&& range) {
return FromIndexRange(range.begin(), range.end());
}
template <typename Iterator>
static constexpr SmallBitSet FromIndexRange(Iterator begin, Iterator end) {
SmallBitSet set;
while (begin != end) set.set(*begin++);
return set;
}
template <size_t NumBits, typename = std::enable_if_t<(NumBits <= N)>>
static constexpr SmallBitSet FromBools(const bool (&bits)[NumBits]) {
return FromBoolRange(std::begin(bits), std::end(bits));
}
template <typename Range>
static constexpr SmallBitSet FromBoolRange(Range&& range) {
return FromBoolRange(range.begin(), range.end());
}
template <typename Iterator>
static constexpr SmallBitSet FromBoolRange(Iterator begin, Iterator end) {
SmallBitSet set;
size_t i = 0;
while (begin != end) {
set.bits_ |= (*begin++ ? Uint(1) : Uint(0)) << i;
i++;
}
assert(i <= N);
return set;
}
static constexpr SmallBitSet UpTo(size_t k) {
assert(k <= N);
return k == 0 ? SmallBitSet()
: SmallBitSet::FromUint(~Uint(0) << (N - k) >> (N - k));
}
template <typename T,
typename = std::enable_if_t<std::is_same_v<T, bool>>>
constexpr SmallBitSet& operator=(T value) {
bits_ = ~Uint(0) * value;
return *this;
}
using BoolsView = bitset_impl::BoolsView<BitIterator<Uint>, N>;
constexpr BoolsView bools_view() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return BoolsView(BitIterator<Uint>(&bits_, 0));
}
using ConstBoolsView = bitset_impl::BoolsView<BitIterator<const Uint>, N>;
constexpr ConstBoolsView bools_view() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return ConstBoolsView(BitIterator<const Uint>(&bits_, 0));
}
using IndexView = bitset_impl::IndexView<Uint>;
constexpr IndexView index_view() const { return IndexView(bits_); }
constexpr static size_t size() { return N; }
constexpr size_t count() const { return absl::popcount(bits_); }
constexpr bool none() const { return bits_ == 0; }
constexpr bool any() const { return bits_ != 0; }
constexpr bool all() const { return bits_ == ~Uint(0); }
explicit operator bool() const { return any(); }
constexpr SmallBitSet& set() noexcept {
bits_ = ~Uint(0);
return *this;
}
constexpr SmallBitSet& reset() noexcept {
bits_ = 0;
return *this;
}
constexpr SmallBitSet& flip() noexcept {
bits_ = ~bits_;
return *this;
}
constexpr bool test(int pos) const noexcept {
assert(pos >= 0 && pos < N);
return (bits_ >> pos) & 1;
}
constexpr SmallBitSet& set(int pos) noexcept {
assert(pos >= 0 && pos < N);
bits_ |= (static_cast<Uint>(1) << pos);
return *this;
}
constexpr SmallBitSet& reset(int pos) noexcept {
assert(pos >= 0 && pos < N);
bits_ &= ~(static_cast<Uint>(1) << pos);
return *this;
}
constexpr SmallBitSet& flip(int pos) noexcept {
assert(pos >= 0 && pos < N);
bits_ ^= (static_cast<Uint>(1) << pos);
return *this;
}
constexpr reference operator[](size_t offset) ABSL_ATTRIBUTE_LIFETIME_BOUND {
assert(offset >= 0 && offset < N);
return reference(&bits_, offset);
}
constexpr bool operator[](size_t offset) const {
assert(offset >= 0 && offset < N);
return test(offset);
}
constexpr Uint to_uint() const { return bits_; }
friend constexpr SmallBitSet operator~(SmallBitSet v) {
return SmallBitSet::FromUint(~v.bits_);
}
friend constexpr SmallBitSet operator&(SmallBitSet a, SmallBitSet b) {
return SmallBitSet::FromUint(a.bits_ & b.bits_);
}
friend constexpr SmallBitSet& operator&=(SmallBitSet& a, SmallBitSet b) {
a.bits_ &= b.bits_;
return a;
}
friend constexpr SmallBitSet operator^(SmallBitSet a, SmallBitSet b) {
return SmallBitSet::FromUint(a.bits_ ^ b.bits_);
}
friend constexpr SmallBitSet& operator^=(SmallBitSet& a, SmallBitSet b) {
a.bits_ ^= b.bits_;
return a;
}
friend constexpr SmallBitSet operator|(SmallBitSet a, SmallBitSet b) {
return SmallBitSet::FromUint(a.bits_ | b.bits_);
}
friend constexpr SmallBitSet& operator|=(SmallBitSet& a, SmallBitSet b) {
a.bits_ |= b.bits_;
return a;
}
friend constexpr bool operator==(SmallBitSet a, SmallBitSet b) {
return a.bits_ == b.bits_;
}
friend constexpr bool operator!=(SmallBitSet a, SmallBitSet b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, SmallBitSet v) {
for (size_t i = 0; i < N; ++i) {
os << (static_cast<bool>(v[i]) ? '1' : '0');
}
return os;
}
private:
Uint bits_;
};
}
#endif | #include "tensorstore/util/small_bit_set.h"
#include <stdint.h>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::BitIterator;
using ::tensorstore::BitRef;
using BitSet = ::tensorstore::SmallBitSet<32>;
static_assert(
std::is_convertible_v<BitIterator<uint32_t>, BitIterator<const uint32_t>>);
static_assert(
!std::is_convertible_v<BitIterator<const uint32_t>, BitIterator<uint32_t>>);
TEST(BitRefTest, Basic) {
uint16_t data[2] = {0, 0};
BitRef<uint16_t> ref(data + 1, 19);
BitRef<uint16_t> ref2(data, 2);
BitRef<const uint16_t> const_ref(data, 3);
EXPECT_EQ(false, ref);
ref = true;
EXPECT_EQ(true, ref);
EXPECT_THAT(data, ::testing::ElementsAre(0, 8));
data[0] = 0xffff ;
data[1] = 0xffff ;
EXPECT_EQ(true, ref);
ref = false;
EXPECT_EQ(false, ref);
EXPECT_THAT(data, ::testing::ElementsAre(0xffff ,
0xfff7 ));
ref = ref2;
EXPECT_THAT(data, ::testing::ElementsAre(0xffff ,
0xffff ));
data[0] = 0;
ref = const_ref;
EXPECT_THAT(data, ::testing::ElementsAre(0, 0xfff7 ));
}
TEST(BitRefTest, Swap) {
uint16_t data[2] = {0, 0};
BitRef<uint16_t> ref(data + 1, 19);
BitRef<uint16_t> ref2(data, 2);
uint32_t data2 = 0;
ref = true;
ref2 = false;
EXPECT_THAT(data, ::testing::ElementsAre(0, 8));
using std::swap;
swap(ref, ref2);
EXPECT_EQ(false, ref);
EXPECT_EQ(true, ref2);
EXPECT_THAT(data, ::testing::ElementsAre(4, 0));
bool b = false;
swap(b, ref2);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0));
EXPECT_EQ(true, b);
swap(ref2, b);
EXPECT_THAT(data, ::testing::ElementsAre(4, 0));
EXPECT_EQ(false, b);
BitRef<uint32_t> ref3(&data2, 1);
swap(ref2, ref3);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0));
EXPECT_EQ(2, data2);
}
TEST(BitIteratorTest, Basic) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
BitIterator<uint16_t> it2(data, 2);
BitIterator<const uint16_t> const_it(data, 3);
BitIterator<const uint16_t> const_it2 = it;
EXPECT_EQ(data, it.base());
EXPECT_EQ(data, it2.base());
EXPECT_EQ(data, const_it.base());
EXPECT_EQ(data, const_it2.base());
EXPECT_EQ(19, it.offset());
EXPECT_EQ(2, it2.offset());
EXPECT_EQ(3, const_it.offset());
EXPECT_EQ(19, const_it2.offset());
{
auto ref = *it;
static_assert(std::is_same_v<BitRef<uint16_t>, decltype(ref)>);
auto ref_subscript = it[0];
auto ref_subscript2 = it2[17];
static_assert(std::is_same_v<BitRef<uint16_t>, decltype(ref_subscript)>);
EXPECT_FALSE(ref_subscript);
EXPECT_FALSE(ref_subscript2);
ref = true;
EXPECT_TRUE(ref);
EXPECT_TRUE(ref_subscript);
EXPECT_TRUE(ref_subscript2);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0x8 ));
ref = false;
EXPECT_FALSE(ref);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0));
data[1] = ~0x8;
EXPECT_FALSE(ref);
ref = true;
EXPECT_TRUE(ref);
EXPECT_THAT(data, ::testing::ElementsAre(0, 0xffff));
}
{
auto ref = *const_it;
static_assert(std::is_same_v<BitRef<const uint16_t>, decltype(ref)>);
EXPECT_FALSE(ref);
data[0] = 0x8 ;
EXPECT_TRUE(ref);
data[0] = ~data[0];
EXPECT_FALSE(ref);
}
}
TEST(BitIteratorTest, IteratorPlusOffset) {
uint16_t data[2] = {0, 0};
auto it = BitIterator<uint16_t>(data, 3) + 5;
EXPECT_EQ(data, it.base());
EXPECT_EQ(8, it.offset());
}
TEST(BitIteratorTest, OffsetPlusIterator) {
uint16_t data[2] = {0, 0};
auto it = 5 + BitIterator<uint16_t>(data, 3);
EXPECT_EQ(data, it.base());
EXPECT_EQ(8, it.offset());
}
TEST(BitIteratorTest, IteratorMinusOffset) {
uint16_t data[2] = {0, 0};
auto it = BitIterator<uint16_t>(data, 7) - 2;
EXPECT_EQ(data, it.base());
EXPECT_EQ(5, it.offset());
}
TEST(BitIteratorTest, IteratorMinusIterator) {
uint16_t data[2] = {0, 0};
EXPECT_EQ(3, BitIterator<uint16_t>(data, 7) - BitIterator<uint16_t>(data, 4));
EXPECT_EQ(
3, BitIterator<uint16_t>(data, 7) - BitIterator<const uint16_t>(data, 4));
EXPECT_EQ(
3, BitIterator<const uint16_t>(data, 7) - BitIterator<uint16_t>(data, 4));
EXPECT_EQ(3, BitIterator<const uint16_t>(data, 7) -
BitIterator<const uint16_t>(data, 4));
}
TEST(BitIteratorTest, PreIncrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
auto& x = ++it;
EXPECT_EQ(&it, &x);
EXPECT_EQ(20, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, PreDecrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
auto& x = --it;
EXPECT_EQ(&it, &x);
EXPECT_EQ(18, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, PostIncrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
EXPECT_EQ(BitIterator<uint16_t>(data, 19), it++);
EXPECT_EQ(20, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, PostDecrement) {
uint16_t data[2] = {0, 0};
BitIterator<uint16_t> it(data, 19);
EXPECT_EQ(BitIterator<uint16_t>(data, 19), it--);
EXPECT_EQ(18, it.offset());
EXPECT_EQ(data, it.base());
}
TEST(BitIteratorTest, Comparison) {
uint16_t data[2] = {0, 0};
EXPECT_EQ(BitIterator<uint16_t>(data, 3), BitIterator<uint16_t>(data, 3));
EXPECT_EQ(BitIterator<uint16_t>(data, 3),
BitIterator<const uint16_t>(data, 3));
EXPECT_NE(BitIterator<uint16_t>(data, 3), BitIterator<uint16_t>(data, 4));
EXPECT_NE(BitIterator<uint16_t>(data, 3),
BitIterator<const uint16_t>(data, 4));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) < BitIterator<uint16_t>(data, 4));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) <
BitIterator<const uint16_t>(data, 4));
EXPECT_FALSE(BitIterator<uint16_t>(data, 3) < BitIterator<uint16_t>(data, 3));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) <= BitIterator<uint16_t>(data, 4));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) <= BitIterator<uint16_t>(data, 3));
EXPECT_FALSE(BitIterator<uint16_t>(data, 3) <=
BitIterator<uint16_t>(data, 2));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) > BitIterator<uint16_t>(data, 2));
EXPECT_FALSE(BitIterator<uint16_t>(data, 3) > BitIterator<uint16_t>(data, 3));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) >= BitIterator<uint16_t>(data, 2));
EXPECT_TRUE(BitIterator<uint16_t>(data, 3) >= BitIterator<uint16_t>(data, 3));
EXPECT_FALSE(BitIterator<uint16_t>(data, 2) >=
BitIterator<uint16_t>(data, 3));
}
TEST(SmallBitSetTest, DefaultConstruct) {
BitSet v;
EXPECT_FALSE(v);
EXPECT_EQ(0, v.to_uint());
EXPECT_EQ(v, v);
BitSet v_true = true;
EXPECT_EQ(v_true, v_true);
EXPECT_NE(v, v_true);
EXPECT_THAT(v.bools_view(),
::testing::ElementsAreArray(std::vector<bool>(32)));
}
TEST(SmallBitSetTest, FromUint) {
auto v = BitSet::FromUint(0b11'0111);
EXPECT_TRUE(v);
EXPECT_EQ(0b110111, v.to_uint());
EXPECT_EQ(true, v[0]);
EXPECT_EQ(true, v[1]);
EXPECT_EQ(true, v[2]);
EXPECT_EQ(false, v[3]);
EXPECT_EQ(true, v[4]);
EXPECT_EQ(true, v[5]);
EXPECT_THAT(v.bools_view(), ::testing::ElementsAre(1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0));
EXPECT_THAT(const_cast<const BitSet&>(v).bools_view(),
::testing::ElementsAre(1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0));
EXPECT_EQ(
"11101100"
"00000000"
"00000000"
"00000000",
tensorstore::StrCat(v));
EXPECT_EQ(0b11111111'11111111'11111111'11001000, (~v).to_uint());
auto v1 = BitSet::FromUint(0b101'1100);
EXPECT_EQ(0b111'1111, (v | v1).to_uint());
EXPECT_EQ(0b001'0100, (v & v1).to_uint());
EXPECT_EQ(0b110'1011, (v ^ v1).to_uint());
auto v2 = v1;
v2 |= v;
EXPECT_EQ(0b111'1111, v2.to_uint());
v2 = v1;
v2 &= v;
EXPECT_EQ(0b001'0100, v2.to_uint());
v2 = v1;
v2 ^= v;
EXPECT_EQ(0b110'1011, v2.to_uint());
}
TEST(SmallBitSetTest, BracedList) {
auto v = BitSet::FromBools({0, 1, 1, 0, 0, 1});
EXPECT_EQ(0b100110, v.to_uint());
}
TEST(SmallBitSetTest, Reference) {
BitSet v;
v[2] = true;
EXPECT_TRUE(v[2]);
EXPECT_FALSE(v[0]);
EXPECT_EQ(0b100, v.to_uint());
}
TEST(SmallBitSetTest, UpTo) {
EXPECT_EQ(0x00000000, BitSet::UpTo(0).to_uint());
EXPECT_EQ(0x00000001, BitSet::UpTo(1).to_uint());
EXPECT_EQ(0x0000ffff, BitSet::UpTo(16).to_uint());
EXPECT_EQ(0x7fffffff, BitSet::UpTo(31).to_uint());
EXPECT_EQ(0xffffffff, BitSet::UpTo(32).to_uint());
EXPECT_EQ(1, BitSet::UpTo(1).count());
}
TEST(SmallBitSetTest, FromIndices) {
BitSet v = BitSet::FromIndices({1, 3, 10});
EXPECT_FALSE(v.none());
EXPECT_EQ(3, v.count());
EXPECT_EQ((static_cast<uint32_t>(1) << 1) | (static_cast<uint32_t>(1) << 3) |
(static_cast<uint32_t>(1) << 10),
v.to_uint());
EXPECT_THAT(v.index_view(), ::testing::ElementsAre(1, 3, 10));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/small_bit_set.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/small_bit_set_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3e216e1c-4bba-4884-a26e-b3fea70d8fbb | cpp | tensorflow/tensorflow | quantize_training | tensorflow/core/common_runtime/quantize_training.cc | tensorflow/core/common_runtime/quantize_training_test.cc | #include "tensorflow/core/common_runtime/quantize_training.h"
#include <algorithm>
#include <atomic>
#include <set>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
const uint32 kAllowedInputs = 2;
const float kEMADecay = 0.999;
const auto* nodes_to_rewrite =
new std::unordered_set<string, StringPieceHasher>{"MatMul", "Conv2D"};
struct EdgeToConvert {
const Edge* edge;
int32 num_bits;
bool signed_input;
bool range_given;
float input_min;
float input_max;
EdgeToConvert(const Edge* e, int32_t bits, bool sign, bool range, float min,
float max)
: edge(e),
num_bits(bits),
signed_input(sign),
range_given(range),
input_min(min),
input_max(max) {}
};
inline bool IsGradientNode(const Graph* graph, const Node* node) {
static const string tag = "gradients";
return (node->name().compare(0, tag.size(), tag) == 0);
}
bool FindType(const Graph* graph, const Node* node, bool* signed_input,
bool* range_given, float* input_min, float* input_max) {
const string& src_op = node->type_string();
if (src_op == "Const" || src_op == "Variable" || src_op == "VariableV2") {
*signed_input = true;
*range_given = false;
} else if (src_op == "Relu") {
*signed_input = false;
*range_given = false;
} else if (src_op == "Relu6") {
*signed_input = false;
*range_given = true;
*input_min = 0;
*input_max = 6;
} else if (src_op == "Sigmoid") {
*signed_input = false;
*range_given = true;
*input_min = 0;
*input_max = 1;
} else if (src_op == "Tanh") {
*signed_input = true;
*range_given = true;
*input_min = -1;
*input_max = 1;
} else if (src_op == "Reshape" || src_op == "ConcatV2") {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() != Graph::kControlSlot && edge->dst_input() == 0) {
FindType(graph, edge->src(), signed_input, range_given, input_min,
input_max);
}
}
} else if (src_op == "Identity" || src_op == "MaxPool" ||
src_op == "AvgPool" || src_op == "MaxPool3D" ||
src_op == "AvgPool3D") {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() != Graph::kControlSlot) {
FindType(graph, edge->src(), signed_input, range_given, input_min,
input_max);
}
}
} else {
*signed_input = true;
*range_given = false;
return false;
}
return true;
}
Status FindSaveOp(const Graph* graph, Node** save_op,
std::vector<const Edge*>* in_edges, bool* found) {
*found = false;
for (Node* node : graph->op_nodes()) {
if (node->type_string() == "SaveV2") {
if (*found) {
return errors::InvalidArgument("Input graph has multiple SaveV2 ops.");
}
*save_op = node;
*found = true;
TF_RETURN_IF_ERROR(node->input_edges(in_edges));
}
}
return absl::OkStatus();
}
Node* FindRestoreAllOp(const Graph* graph, StringPiece save_prefix) {
for (Node* node : graph->op_nodes()) {
if (node->name() == strings::StrCat(save_prefix, "/restore_all")) {
return node;
}
}
return nullptr;
}
StringPiece GetNodeNamePrefix(const Node* node) {
StringPiece name = node->name();
return name.substr(0, name.rfind('/'));
}
void FillStringTensor(Tensor* dst, const Tensor& src) {
auto dst_flat = dst->flat<tstring>();
auto src_flat = src.flat<tstring>();
for (int i = 0; i < src.NumElements(); i++) {
dst_flat(i) = src_flat(i);
}
}
Status ConnectVariablesToSaveOp(Graph* graph, Node* save_op,
const std::vector<const Edge*>& in_edges,
const std::vector<Node*>& added_variables) {
Node* tensor_names_op = in_edges[1]->src();
Node* shape_and_slices_op = in_edges[2]->src();
Tensor tensor_names;
Tensor shape_and_slices;
TF_RETURN_IF_ERROR(
GetNodeAttr(tensor_names_op->attrs(), "value", &tensor_names));
TF_RETURN_IF_ERROR(
GetNodeAttr(shape_and_slices_op->attrs(), "value", &shape_and_slices));
int tn_size = tensor_names.NumElements();
int var_size = added_variables.size();
NodeBuilder save_op_builder =
NodeBuilder(save_op->name(), save_op->type_string());
for (int i = 0; i < 3; i++) {
save_op_builder = save_op_builder.Input(in_edges[i]->src());
}
std::vector<NodeBuilder::NodeOut> var_nodeouts;
var_nodeouts.reserve(tn_size + var_size);
for (int i = 3; i < in_edges.size(); i++) {
var_nodeouts.emplace_back(in_edges[i]->src());
}
Tensor new_tensor_names(DT_STRING, TensorShape({tn_size + var_size}));
Tensor new_shape_and_slices(DT_STRING, TensorShape({tn_size + var_size}));
FillStringTensor(&new_tensor_names, tensor_names);
FillStringTensor(&new_shape_and_slices, shape_and_slices);
for (int i = 0; i < var_size; i++) {
Node* var = added_variables[i];
new_tensor_names.flat<tstring>()(tn_size + i) = var->name();
new_shape_and_slices.flat<tstring>()(tn_size + i) = "";
var_nodeouts.emplace_back(var);
}
save_op_builder = save_op_builder.Input(var_nodeouts);
tensor_names_op->AddAttr("value", new_tensor_names);
shape_and_slices_op->AddAttr("value", new_shape_and_slices);
Node* new_save_op;
TF_RETURN_IF_ERROR(save_op_builder.Finalize(graph, &new_save_op));
for (const Edge* edge : save_op->out_edges()) {
graph->AddControlEdge(new_save_op, edge->dst());
}
graph->RemoveNode(save_op);
return absl::OkStatus();
}
Status AddRestoreVariableSubgraphs(Graph* graph, Node* save_op,
const std::vector<const Edge*>& in_edges,
const std::vector<Node*>& variables) {
Node* prefix_op = in_edges[0]->src();
StringPiece name_prefix = GetNodeNamePrefix(save_op);
Node* restore_all = FindRestoreAllOp(graph, name_prefix);
if (restore_all == nullptr) {
return errors::InvalidArgument("graph has SaveOp, but no restore_all NoOp");
}
const string restore_op_name = strings::StrCat(name_prefix, "/RestoreV2");
const string assign_op_name = strings::StrCat(name_prefix, "/Assign");
for (Node* var : variables) {
string new_restore_op_name =
strings::StrCat(graph->NewName(restore_op_name), "_qt");
string new_assign_op_name =
strings::StrCat(graph->NewName(assign_op_name), "_qt");
string tensor_names_op_name =
strings::StrCat(new_restore_op_name, "/tensor_names");
string shape_and_slices_op_name =
strings::StrCat(new_restore_op_name, "/shape_and_slices");
Node* tensor_names;
Tensor tensor_names_val(DT_STRING, TensorShape({1}));
tensor_names_val.flat<tstring>()(0) = var->name();
TF_RETURN_IF_ERROR(NodeBuilder(tensor_names_op_name, "Const")
.Attr("dtype", DT_STRING)
.Attr("value", tensor_names_val)
.Finalize(graph, &tensor_names));
Node* shape_and_slices;
Tensor shape_and_slices_val(DT_STRING, TensorShape({1}));
shape_and_slices_val.flat<tstring>()(0) = "";
TF_RETURN_IF_ERROR(NodeBuilder(shape_and_slices_op_name, "Const")
.Attr("dtype", DT_STRING)
.Attr("value", shape_and_slices_val)
.Finalize(graph, &shape_and_slices));
Node* restore_op;
TF_RETURN_IF_ERROR(NodeBuilder(new_restore_op_name, "RestoreV2")
.Input(prefix_op)
.Input(tensor_names)
.Input(shape_and_slices)
.Attr("dtypes", {DT_FLOAT})
.Finalize(graph, &restore_op));
Node* assign_op;
TF_RETURN_IF_ERROR(NodeBuilder(new_assign_op_name, "Assign")
.Input(var)
.Input(restore_op)
.Finalize(graph, &assign_op));
graph->AddControlEdge(assign_op, restore_all);
}
return absl::OkStatus();
}
Status AddSaveAndRestore(Graph* graph, const std::vector<Node*>& variables) {
Node* save_op = nullptr;
std::vector<const Edge*> in_edges;
bool found = false;
TF_RETURN_IF_ERROR(FindSaveOp(graph, &save_op, &in_edges, &found));
if (found) {
TF_RETURN_IF_ERROR(
AddRestoreVariableSubgraphs(graph, save_op, in_edges, variables));
TF_RETURN_IF_ERROR(
ConnectVariablesToSaveOp(graph, save_op, in_edges, variables));
}
return absl::OkStatus();
}
Status MakeReductionAxes(Graph* graph, string name_prefix, Node* input,
Node** output) {
name_prefix = strings::StrCat(name_prefix, "/ReductionAxes");
Node* start;
Tensor zero_tensor(DT_INT32, TensorShape());
zero_tensor.flat<int32>()(0) = 0;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/RangeStart"), "Const")
.Attr("dtype", DT_INT32)
.Attr("value", zero_tensor)
.Finalize(graph, &start));
Node* delta;
Tensor one_tensor(DT_INT32, TensorShape());
one_tensor.flat<int32>()(0) = 1;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/RangeDelta"), "Const")
.Attr("dtype", DT_INT32)
.Attr("value", one_tensor)
.Finalize(graph, &delta));
Node* rank;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputRank"), "Rank")
.Input(input)
.Finalize(graph, &rank));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/ReductionAxes"), "Range")
.Input(start)
.Input(rank)
.Input(delta)
.Finalize(graph, output));
return absl::OkStatus();
}
Status MakeExponentialMovingAverage(Graph* graph, string name_prefix,
const NodeBuilder::NodeOut& input,
Node* decay, Node* update_variable,
Node** assign_value) {
name_prefix = strings::StrCat(name_prefix, "/EMA");
Node* one;
Tensor one_tensor(DT_FLOAT, TensorShape());
one_tensor.flat<float>()(0) = 1.0;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/OneConst"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", one_tensor)
.Finalize(graph, &one));
Node* decay_complement;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/DecayComplement"), "Sub")
.Input(one)
.Input(decay)
.Finalize(graph, &decay_complement));
Node* value_diff;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/ValueDiff"), "Sub")
.Input(update_variable)
.Input(input)
.Finalize(graph, &value_diff));
Node* update_value;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/UpdateValue"), "Mul")
.Input(value_diff)
.Input(decay_complement)
.Finalize(graph, &update_value));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/EMAValue"), "Sub")
.Input(update_variable)
.Input(update_value)
.Finalize(graph, assign_value));
return absl::OkStatus();
}
Status MakeInitializedEMAVariable(Graph* graph, const string& name, Node* decay,
Node* init_val,
std::vector<Node*>* added_variables,
Node** var) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name, "/Variable"), "VariableV2")
.Attr("shape", TensorShape())
.Attr("dtype", DT_FLOAT)
.Finalize(graph, var));
added_variables->push_back(*var);
Node* is_initialized;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/IsInitialized"),
"IsVariableInitialized")
.Input(*var)
.Finalize(graph, &is_initialized));
Node* switch_node;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/Switch"), "Switch")
.Input(init_val)
.Input(is_initialized)
.Finalize(graph, &switch_node));
NodeBuilder::NodeOut output_false = NodeBuilder::NodeOut(switch_node, 0);
NodeBuilder::NodeOut output_true = NodeBuilder::NodeOut(switch_node, 1);
Node* ema_value;
TF_RETURN_IF_ERROR(MakeExponentialMovingAverage(graph, name, output_true,
decay, *var, &ema_value));
Node* assign_value;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/Merge"), "Merge")
.Input({output_false, ema_value})
.Finalize(graph, &assign_value));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name, "/AssignValue"), "Assign")
.Input(*var)
.Input(assign_value)
.Finalize(graph, var));
return absl::OkStatus();
}
Status MakeEMAMinMaxVars(Graph* graph, const string& name_prefix, Node* input,
std::vector<Node*>* added_variables, Node** min_var,
Node** max_var) {
Tensor decay_tensor(DT_FLOAT, TensorShape());
decay_tensor.flat<float>()(0) = kEMADecay;
Node* decay;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/Decay"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", decay_tensor)
.Finalize(graph, &decay));
Node* reduction_axes;
TF_RETURN_IF_ERROR(
MakeReductionAxes(graph, name_prefix, input, &reduction_axes));
Node* min;
string min_name = strings::StrCat(name_prefix, "/Min");
TF_RETURN_IF_ERROR(NodeBuilder(min_name, "Min")
.Input(input)
.Input(reduction_axes)
.Finalize(graph, &min));
Node* max;
string max_name = strings::StrCat(name_prefix, "/Max");
TF_RETURN_IF_ERROR(NodeBuilder(max_name, "Max")
.Input(input)
.Input(reduction_axes)
.Finalize(graph, &max));
TF_RETURN_IF_ERROR(MakeInitializedEMAVariable(graph, min_name, decay, min,
added_variables, min_var));
TF_RETURN_IF_ERROR(MakeInitializedEMAVariable(graph, max_name, decay, max,
added_variables, max_var));
return absl::OkStatus();
}
Status MakeInputMinMax(Graph* graph, const string& name_prefix,
const EdgeToConvert& edge,
std::vector<Node*>* added_variables, Node** input_min,
Node** input_max) {
if (edge.range_given) {
Tensor input_min_tensor(DT_FLOAT, TensorShape());
input_min_tensor.flat<float>()(0) = edge.input_min;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputMin"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", input_min_tensor)
.Finalize(graph, input_min));
Tensor input_max_tensor(DT_FLOAT, TensorShape());
input_max_tensor.flat<float>()(0) = edge.input_max;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputMax"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", input_max_tensor)
.Finalize(graph, input_max));
} else {
TF_RETURN_IF_ERROR(MakeEMAMinMaxVars(graph, name_prefix, edge.edge->src(),
added_variables, input_min,
input_max));
}
return absl::OkStatus();
}
Status MakeQuantizeOp(Graph* graph, const string& name_prefix,
const string& quant_op_type, const EdgeToConvert& edge,
std::vector<Node*>* added_variables,
Node** convert_node) {
Node* input_min;
Node* input_max;
TF_RETURN_IF_ERROR(MakeInputMinMax(graph, name_prefix, edge, added_variables,
&input_min, &input_max));
string quant_name = strings::StrCat(name_prefix, "/", quant_op_type);
if (quant_op_type == "QuantizeAndDequantizeV2") {
TF_RETURN_IF_ERROR(NodeBuilder(quant_name, quant_op_type)
.Input(edge.edge->src())
.Input(input_min)
.Input(input_max)
.Attr("signed_input", edge.signed_input)
.Attr("num_bits", edge.num_bits)
.Attr("range_given", true)
.Finalize(graph, convert_node));
} else if (quant_op_type == "FakeQuantWithMinMaxVars") {
TF_RETURN_IF_ERROR(NodeBuilder(quant_name, quant_op_type)
.Input(edge.edge->src())
.Input(input_min)
.Input(input_max)
.Attr("num_bits", edge.num_bits)
.Finalize(graph, convert_node));
} else {
return errors::InvalidArgument("Unknown quant op type: ", quant_op_type);
}
return absl::OkStatus();
}
Status ProcessTargetEdges(Graph* graph, const string& quant_op_type,
const std::vector<EdgeToConvert>& target_edges) {
std::unordered_map<string, Node*, StringPieceHasher> name_index;
std::vector<Node*> added_variables;
for (const EdgeToConvert edge : target_edges) {
Node* convert_node;
string name_prefix = edge.edge->src()->name();
auto iter = name_index.find(name_prefix);
if (iter == name_index.end()) {
TF_RETURN_IF_ERROR(MakeQuantizeOp(graph, name_prefix, quant_op_type, edge,
&added_variables, &convert_node));
name_index[name_prefix] = convert_node;
} else {
convert_node = iter->second;
}
graph->AddEdge(convert_node, 0, edge.edge->dst(), edge.edge->dst_input());
graph->RemoveEdge(edge.edge);
}
TF_RETURN_IF_ERROR(AddSaveAndRestore(graph, added_variables));
return absl::OkStatus();
}
}
Status DoQuantizeTraining(int32_t num_bits, const string& quant_op_type,
Graph* graph) {
if (graph == nullptr) {
return errors::InvalidArgument("Cannot accept empty graph pointer.");
}
if (num_bits < 1 || num_bits > 63) {
return errors::OutOfRange("num_bits should be in range [1, 63] but is: ",
num_bits);
}
int potential_input = 0;
std::vector<EdgeToConvert> target_edges;
for (Node* node : graph->nodes()) {
if (nodes_to_rewrite->find(node->type_string()) !=
nodes_to_rewrite->end() &&
!IsGradientNode(graph, node)) {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() == Graph::kControlSlot) {
continue;
} else {
bool signed_input = false;
bool range_given = false;
float input_min = 0;
float input_max = 0;
bool known_op = FindType(graph, edge->src(), &signed_input,
&range_given, &input_min, &input_max);
if (!known_op) {
potential_input++;
if (potential_input > kAllowedInputs) {
return errors::Unimplemented(
"Found an unknown op: ", edge->src()->name(),
" with type: ", edge->src()->type_string(),
"; Unknown ops are considered as model input for now and "
"only ",
kAllowedInputs, " inputs are supported currently.");
}
}
target_edges.emplace_back(EdgeToConvert(
edge, num_bits, signed_input, range_given, input_min, input_max));
}
}
}
}
TF_RETURN_IF_ERROR(ProcessTargetEdges(graph, quant_op_type, target_edges));
return absl::OkStatus();
}
Status DoQuantizeTrainingOnGraphDef(const GraphDef& input_graphdef,
int32_t num_bits,
const string& quant_op_type,
GraphDef* result_graphdef) {
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, input_graphdef, &graph));
TF_RETURN_IF_ERROR(DoQuantizeTraining(num_bits, quant_op_type, &graph));
graph.ToGraphDef(result_graphdef);
return absl::OkStatus();
}
Status DoQuantizeTrainingOnSerializedGraphDef(const string& input_graph_string,
int32_t num_bits,
const string& quant_op_type,
string* result_graph_string) {
GraphDef input_graphdef;
if (!ParseProtoUnlimited(&input_graphdef, input_graph_string)) {
return errors::InvalidArgument(
"input_graph_string is not a serialized GraphDef protocol buffer");
}
GraphDef output_graphdef;
TF_RETURN_IF_ERROR(DoQuantizeTrainingOnGraphDef(
input_graphdef, num_bits, quant_op_type, &output_graphdef));
if (!output_graphdef.SerializeToString(result_graph_string)) {
return errors::Internal(
"quantize training transformation resulted in invalid GraphDef");
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/quantize_training.h"
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class QuantizeTrainingTest : public ::testing::Test {
protected:
QuantizeTrainingTest() { Reset(); }
void Reset() { g_.reset(new Graph(OpRegistry::Global())); }
template <typename T>
Node* Constant(gtl::ArraySlice<T> values, TensorShape shape) {
return test::graph::Constant(g_.get(), test::AsTensor(values, shape));
}
Status Placeholder(Graph* g, const string& name, TensorShape shape,
Node** out) {
TF_RETURN_IF_ERROR(NodeBuilder(name, "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", shape)
.Finalize(g, out));
return absl::OkStatus();
}
Status FindNode(Graph* g, const string& name, Node** out) {
for (Node* node : g->nodes()) {
if (node->name() == name) {
*out = node;
return absl::OkStatus();
}
}
return errors::Unimplemented("Node ", name, " not found.");
}
std::unique_ptr<Graph> g_;
};
TEST_F(QuantizeTrainingTest, SignedInput) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(63, g->num_nodes());
Node* identity_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/QuantizeAndDequantizeV2"),
&identity_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*identity_q_node->attrs().Find("signed_input")));
Node* relu_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&relu_q_node));
ASSERT_EQ("false",
SummarizeAttrValue(*relu_q_node->attrs().Find("signed_input")));
}
TEST_F(QuantizeTrainingTest, RangeGivenTrue) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, b);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(38, g->num_nodes());
Node* relu6_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu6->name(), "/QuantizeAndDequantizeV2"),
&relu6_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*relu6_q_node->attrs().Find("range_given")));
Node* relu_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&relu_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*relu_q_node->attrs().Find("range_given")));
}
TEST_F(QuantizeTrainingTest, WithBackwardNodes_QuantizeAndDequantize) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* c = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
Node* d = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
g->AddControlEdge(g->source_node(), c);
g->AddControlEdge(g->source_node(), d);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
Node* m2 = test::graph::Matmul(g, identity, c, false, false);
g->AddControlEdge(m1, g->sink_node());
g->AddControlEdge(m2, g->sink_node());
Node* backward_m;
TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul")
.Input(d)
.Input(m2)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Finalize(g, &backward_m));
g->AddControlEdge(backward_m, g->sink_node());
int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(95, g->num_nodes());
Node* found_node;
Status s = FindNode(g, strings::StrCat(d->name(), "/QuantizeAndDequantizeV2"),
&found_node);
EXPECT_TRUE(absl::StrContains(s.ToString(), "not found")) << s;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&found_node));
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/QuantizeAndDequantizeV2"),
&found_node));
TF_ASSERT_OK(FindNode(
g, strings::StrCat(c->name(), "/QuantizeAndDequantizeV2"), &found_node));
}
TEST_F(QuantizeTrainingTest, WithBackwardNodes_FakeQuant) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* c = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
Node* d = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
g->AddControlEdge(g->source_node(), c);
g->AddControlEdge(g->source_node(), d);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
Node* m2 = test::graph::Matmul(g, identity, c, false, false);
g->AddControlEdge(m1, g->sink_node());
g->AddControlEdge(m2, g->sink_node());
Node* backward_m;
TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul")
.Input(d)
.Input(m2)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Finalize(g, &backward_m));
g->AddControlEdge(backward_m, g->sink_node());
int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "FakeQuantWithMinMaxVars", g));
EXPECT_EQ(95, g->num_nodes());
Node* found_node;
Status s = FindNode(g, strings::StrCat(d->name(), "/FakeQuantWithMinMaxVars"),
&found_node);
EXPECT_TRUE(absl::StrContains(s.ToString(), "not found")) << s;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/FakeQuantWithMinMaxVars"),
&found_node));
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/FakeQuantWithMinMaxVars"),
&found_node));
TF_ASSERT_OK(FindNode(
g, strings::StrCat(c->name(), "/FakeQuantWithMinMaxVars"), &found_node));
}
TEST_F(QuantizeTrainingTest, QuantizeSerializedGraphDef) {
Reset();
Graph* graph = g_.get();
Node* const_a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* const_b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
graph->AddControlEdge(graph->source_node(), const_a);
graph->AddControlEdge(graph->source_node(), const_b);
Node* relu = test::graph::Relu(graph, const_a);
Node* identity = test::graph::Identity(graph, const_b);
Node* matmul = test::graph::Matmul(graph, relu, identity, false, false);
graph->AddControlEdge(matmul, graph->sink_node());
int num_bits = 8;
GraphDef input_graph;
graph->ToGraphDef(&input_graph);
string input_string;
input_graph.SerializeToString(&input_string);
string result_string;
TF_ASSERT_OK(DoQuantizeTrainingOnSerializedGraphDef(
input_string, num_bits, "QuantizeAndDequantizeV2", &result_string));
GraphDef result_graphdef;
EXPECT_TRUE(ParseProtoUnlimited(&result_graphdef, result_string));
GraphConstructorOptions opts;
Graph result_graph(OpRegistry::Global());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, result_graphdef, &result_graph));
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", graph));
EXPECT_EQ(graph->num_nodes(), result_graph.num_nodes());
}
TEST_F(QuantizeTrainingTest, QuantizeGraphDef) {
Reset();
Graph* graph = g_.get();
Node* const_a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* const_b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
graph->AddControlEdge(graph->source_node(), const_a);
graph->AddControlEdge(graph->source_node(), const_b);
Node* relu = test::graph::Relu(graph, const_a);
Node* identity = test::graph::Identity(graph, const_b);
Node* matmul = test::graph::Matmul(graph, relu, identity, false, false);
graph->AddControlEdge(matmul, graph->sink_node());
int num_bits = 8;
GraphDef input_graphdef;
graph->ToGraphDef(&input_graphdef);
GraphDef result_graphdef;
TF_ASSERT_OK(DoQuantizeTrainingOnGraphDef(
input_graphdef, num_bits, "QuantizeAndDequantizeV2", &result_graphdef));
GraphConstructorOptions opts;
Graph result_graph(OpRegistry::Global());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, result_graphdef, &result_graph));
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", graph));
EXPECT_EQ(graph->num_nodes(), result_graph.num_nodes());
}
TEST_F(QuantizeTrainingTest, FixedRangeAndEMARange_QuantizeAndDequantize) {
Reset();
Graph* g = g_.get();
Node* a;
TF_ASSERT_OK(Placeholder(g, "a", {2, 2}, &a));
Node* c = Constant<float>({2.0, 3.0, 4.0, 5.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), c);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, c);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
SessionOptions options;
Session* sess;
TF_ASSERT_OK(NewSession(options, &sess));
GraphDef gdef;
g->ToGraphDef(&gdef);
TF_ASSERT_OK(sess->Create(gdef));
string min_const_name = strings::StrCat(relu6->name(), "/InputMin");
string max_const_name = strings::StrCat(relu6->name(), "/InputMax");
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
Tensor a1(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a1, {0.0, 1.0, 2.0, 3.0});
Tensor a2(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a2, {1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK(sess->Run({{"a", a1}}, {m1->name()}, {}, &outputs));
string min_var_name = strings::StrCat(relu->name(), "/Min/Variable");
string max_var_name = strings::StrCat(relu->name(), "/Max/Variable");
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 3.0);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
TF_ASSERT_OK(sess->Run({{"a", a2}}, {m1->name()}, {}, &outputs));
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
const float decay = 0.999;
const float expected_min = 0.0 * decay + 1.0 * (1.0 - decay);
const float expected_max = 3.0 * decay + 4.0 * (1.0 - decay);
EXPECT_NEAR(outputs[0].flat<float>()(0), expected_min, 1e-4);
EXPECT_NEAR(outputs[1].flat<float>()(0), expected_max, 1e-4);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
}
TEST_F(QuantizeTrainingTest, FixedRangeAndEMARange_FakeQuant) {
Reset();
Graph* g = g_.get();
Node* a;
TF_ASSERT_OK(Placeholder(g, "a", {2, 2}, &a));
Node* c = Constant<float>({2.0, 3.0, 4.0, 5.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), c);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, c);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "FakeQuantWithMinMaxVars", g));
SessionOptions options;
Session* sess;
TF_ASSERT_OK(NewSession(options, &sess));
GraphDef gdef;
g->ToGraphDef(&gdef);
TF_ASSERT_OK(sess->Create(gdef));
string min_const_name = strings::StrCat(relu6->name(), "/InputMin");
string max_const_name = strings::StrCat(relu6->name(), "/InputMax");
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
Tensor a1(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a1, {0.0, 1.0, 2.0, 3.0});
Tensor a2(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a2, {1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK(sess->Run({{"a", a1}}, {m1->name()}, {}, &outputs));
string min_var_name = strings::StrCat(relu->name(), "/Min/Variable");
string max_var_name = strings::StrCat(relu->name(), "/Max/Variable");
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 3.0);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
TF_ASSERT_OK(sess->Run({{"a", a2}}, {m1->name()}, {}, &outputs));
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
const float decay = 0.999;
const float expected_min = 0.0 * decay + 1.0 * (1.0 - decay);
const float expected_max = 3.0 * decay + 4.0 * (1.0 - decay);
EXPECT_NEAR(outputs[0].flat<float>()(0), expected_min, 1e-4);
EXPECT_NEAR(outputs[1].flat<float>()(0), expected_max, 1e-4);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/quantize_training.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/quantize_training_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f63c0329-0154-4e2c-bcda-f6fe9aac6a03 | cpp | google/tensorstore | s3_endpoint | tensorstore/kvstore/s3/s3_endpoint.cc | tensorstore/kvstore/s3/s3_endpoint_test.cc | #include "tensorstore/kvstore/s3/s3_endpoint.h"
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/s3/validate.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kAmzBucketRegionHeader[] = "x-amz-bucket-region";
struct S3VirtualHostFormatter {
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("https:
aws_region);
}
};
struct S3PathFormatter {
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("https:
bucket);
}
};
struct S3CustomFormatter {
std::string endpoint;
std::string GetEndpoint(std::string_view bucket,
std::string_view aws_region) const {
return absl::StrFormat("%s/%s", endpoint, bucket);
}
};
template <typename Formatter>
struct ResolveHost {
std::string bucket;
std::string default_aws_region;
Formatter formatter;
void operator()(Promise<S3EndpointRegion> promise,
ReadyFuture<HttpResponse> ready) {
if (!promise.result_needed()) return;
auto& headers = ready.value().headers;
if (auto it = headers.find(kAmzBucketRegionHeader); it != headers.end()) {
promise.SetResult(S3EndpointRegion{
formatter.GetEndpoint(bucket, it->second),
it->second,
});
}
if (!default_aws_region.empty()) {
promise.SetResult(S3EndpointRegion{
formatter.GetEndpoint(bucket, default_aws_region),
default_aws_region,
});
}
promise.SetResult(absl::FailedPreconditionError(tensorstore::StrCat(
"Failed to resolve aws_region for bucket ", QuoteString(bucket))));
}
};
}
std::variant<absl::Status, S3EndpointRegion> ValidateEndpoint(
std::string_view bucket, std::string aws_region, std::string_view endpoint,
std::string host_header) {
ABSL_CHECK(!bucket.empty());
if (!host_header.empty() && endpoint.empty()) {
return absl::InvalidArgumentError(
"\"host_header\" cannot be set without also setting \"endpoint\"");
}
if (internal_kvstore_s3::ClassifyBucketName(bucket) ==
internal_kvstore_s3::BucketNameType::kOldUSEast1) {
if (!aws_region.empty() && aws_region != "us-east-1") {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Bucket ", QuoteString(bucket),
" requires aws_region \"us-east-1\", not ", QuoteString(aws_region)));
}
aws_region = "us-east-1";
}
if (endpoint.empty()) {
if (!aws_region.empty()) {
if (!absl::StrContains(bucket, ".")) {
S3VirtualHostFormatter formatter;
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
S3PathFormatter formatter;
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
return absl::OkStatus();
}
auto parsed = internal::ParseGenericUri(endpoint);
if (parsed.scheme != "http" && parsed.scheme != "https") {
return absl::InvalidArgumentError(
tensorstore::StrCat("Endpoint ", endpoint, " has invalid scheme ",
parsed.scheme, ". Should be http(s)."));
}
if (!parsed.query.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Query in endpoint unsupported ", endpoint));
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Fragment in endpoint unsupported ", endpoint));
}
if (!aws_region.empty()) {
S3CustomFormatter formatter{std::string(endpoint)};
return S3EndpointRegion{
formatter.GetEndpoint(bucket, aws_region),
aws_region,
};
}
return absl::OkStatus();
}
Future<S3EndpointRegion> ResolveEndpointRegion(
std::string bucket, std::string_view endpoint, std::string host_header,
std::shared_ptr<internal_http::HttpTransport> transport) {
assert(!bucket.empty());
assert(transport);
assert(IsValidBucketName(bucket));
if (endpoint.empty()) {
if (!absl::StrContains(bucket, ".")) {
std::string url = absl::StrFormat("https:
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3VirtualHostFormatter>{
std::move(bucket), {}, S3VirtualHostFormatter{}},
transport->IssueRequest(
HttpRequestBuilder("HEAD", std::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
std::string url =
absl::StrFormat("https:
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3PathFormatter>{
std::move(bucket), {}, S3PathFormatter{}},
transport->IssueRequest(
HttpRequestBuilder("HEAD", std ::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
std::string url = absl::StrFormat("%s/%s", endpoint, bucket);
return PromiseFuturePair<S3EndpointRegion>::Link(
ResolveHost<S3CustomFormatter>{
std::move(bucket), "us-east-1",
S3CustomFormatter{std::string(endpoint)}},
transport->IssueRequest(HttpRequestBuilder("HEAD", std::move(url))
.AddHostHeader(host_header)
.BuildRequest(),
{}))
.future;
}
}
} | #include "tensorstore/kvstore/s3/s3_endpoint.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::ResolveEndpointRegion;
using ::tensorstore::internal_kvstore_s3::S3EndpointRegion;
using ::tensorstore::internal_kvstore_s3::ValidateEndpoint;
namespace {
TEST(ValidateEndpointTest, Basic) {
EXPECT_THAT(ValidateEndpoint("testbucket", {}, {}, {}),
::testing::VariantWith<absl::Status>(absl::OkStatus()));
EXPECT_THAT(ValidateEndpoint("test.bucket", {}, {}, {}),
::testing::VariantWith<absl::Status>(absl::OkStatus()));
EXPECT_THAT(ValidateEndpoint("testbucket", "us-east-1", {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", "us-east-1", {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", {}, {}, {}),
::testing::VariantWith<S3EndpointRegion>(testing::_));
EXPECT_THAT(ValidateEndpoint("OldBucket", "us-west-1", {}, {}),
::testing::VariantWith<absl::Status>(
tensorstore::StatusIs(absl::StatusCode::kInvalidArgument)));
EXPECT_THAT(ValidateEndpoint("testbucket", "region", "http:
::testing::VariantWith<S3EndpointRegion>(
S3EndpointRegion{"http:
EXPECT_THAT(
ValidateEndpoint("testbucket", "region", "http:
::testing::VariantWith<S3EndpointRegion>(
S3EndpointRegion{"http:
EXPECT_THAT(ValidateEndpoint("testbucket", {}, {}, "my.header"),
::testing::VariantWith<absl::Status>(
tensorstore::StatusIs(absl::StatusCode::kInvalidArgument)));
}
TEST(ResolveEndpointRegion, Basic) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"HEAD http:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
S3EndpointRegion ehr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr,
ResolveEndpointRegion("testbucket", {}, {}, mock_transport).result());
EXPECT_THAT(ehr.endpoint, "https:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr,
ResolveEndpointRegion("test.bucket", {}, {}, mock_transport).result());
EXPECT_THAT(ehr.endpoint, "https:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr, ResolveEndpointRegion("test.bucket", "http:
mock_transport)
.result());
EXPECT_THAT(ehr.endpoint, "http:
EXPECT_THAT(ehr.aws_region, "us-east-1");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
ehr, ResolveEndpointRegion("test.bucket", "http:
"s3.localhost.com", mock_transport)
.result());
EXPECT_THAT(ehr.endpoint, "http:
EXPECT_THAT(ehr.aws_region, "us-east-1");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_endpoint.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_endpoint_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b7497017-5529-44a5-b7ab-7a0f9672552b | cpp | tensorflow/tensorflow | bfloat16_conversion_folding | third_party/xla/xla/service/bfloat16_conversion_folding.cc | third_party/xla/xla/service/bfloat16_conversion_folding_test.cc | #include "xla/service/bfloat16_conversion_folding.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
class BFloat16ConversionFoldingVisitor : public DfsHloVisitorWithDefault {
public:
explicit BFloat16ConversionFoldingVisitor(
HloComputation* computation, const FloatSupport* bfloat16_support,
BFloat16ConversionFolding* bfloat16_conversion_folding)
: computation_(computation),
bfloat16_support_(bfloat16_support),
bfloat16_conversion_folding_(bfloat16_conversion_folding) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status HandleAllReduce(HloInstruction* crs) override;
static bool Run(HloComputation* computation,
const FloatSupport* bfloat16_support,
BFloat16ConversionFolding* bfloat16_conversion_folding) {
BFloat16ConversionFoldingVisitor visitor(computation, bfloat16_support,
bfloat16_conversion_folding);
TF_CHECK_OK(computation->Accept(&visitor));
return visitor.changed_;
}
private:
absl::Status TryFoldBF16Conversions(HloInstruction* hlo);
absl::Status FoldOutputConversions(HloInstruction* hlo);
absl::Status FoldOperandConversion(HloInstruction* hlo,
int64_t operand_index);
HloComputation* computation_;
const FloatSupport* bfloat16_support_;
BFloat16ConversionFolding* bfloat16_conversion_folding_;
bool changed_ = false;
};
absl::Status BFloat16ConversionFoldingVisitor::FoldOutputConversions(
HloInstruction* hlo) {
std::vector<HloInstruction*> materialized_users = hlo->users();
hlo->mutable_shape()->set_element_type(BF16);
bfloat16_conversion_folding_->UpdateLayout(hlo->mutable_shape());
for (auto user : materialized_users) {
CHECK_EQ(user->opcode(), HloOpcode::kConvert);
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(hlo));
changed_ = true;
}
return absl::OkStatus();
}
absl::Status BFloat16ConversionFoldingVisitor::FoldOperandConversion(
HloInstruction* hlo, int64_t operand_index) {
auto operand = hlo->mutable_operand(operand_index);
CHECK_EQ(operand->opcode(), HloOpcode::kConvert);
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWith(operand_index, operand->mutable_operand(0)));
changed_ = true;
return absl::OkStatus();
}
namespace {
bool AllUsersAreF32ToBF16Converts(const HloInstruction* hlo) {
if (hlo->user_count() == 0 || hlo->shape().element_type() != F32) {
return false;
}
for (const auto user : hlo->users()) {
if (user->opcode() == HloOpcode::kConvert &&
user->shape().element_type() == BF16) {
continue;
}
return false;
}
return true;
}
}
absl::Status BFloat16ConversionFoldingVisitor::TryFoldBF16Conversions(
HloInstruction* hlo) {
std::vector<int64_t> bf16_to_f32_operands;
bool has_other_f32_operands = false;
for (int64_t i = 0; i < hlo->operands().size(); ++i) {
auto operand = hlo->operand(i);
if (operand->shape().element_type() == F32) {
if (operand->opcode() == HloOpcode::kConvert &&
operand->operand(0)->shape().element_type() == BF16 &&
bfloat16_support_->SupportsLowPrecisionOperand(*hlo, i)) {
bf16_to_f32_operands.push_back(i);
} else {
has_other_f32_operands = true;
}
continue;
}
}
const bool fold_output_conversion =
AllUsersAreF32ToBF16Converts(hlo) &&
bfloat16_support_->SupportsLowPrecisionOutput(*hlo);
if (!bfloat16_support_->SupportsMixedPrecisions(*hlo)) {
if (has_other_f32_operands ||
(!fold_output_conversion && hlo->shape().element_type() == F32)) {
return absl::OkStatus();
}
}
if (fold_output_conversion) {
TF_RETURN_IF_ERROR(FoldOutputConversions(hlo));
}
for (int64_t i : bf16_to_f32_operands) {
TF_RETURN_IF_ERROR(FoldOperandConversion(hlo, i));
}
return absl::OkStatus();
}
absl::Status BFloat16ConversionFoldingVisitor::DefaultAction(
HloInstruction* hlo) {
if (hlo->opcode() == HloOpcode::kTuple ||
hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kConstant ||
hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kFusion ||
hlo->opcode() == HloOpcode::kBitcastConvert ||
hlo->opcode() == HloOpcode::kConvert ||
hlo->opcode() == HloOpcode::kCall ||
hlo->opcode() == HloOpcode::kCustomCall ||
hlo->opcode() == HloOpcode::kWhile ||
hlo->opcode() == HloOpcode::kConditional ||
hlo->opcode() == HloOpcode::kAsyncStart ||
hlo->opcode() == HloOpcode::kAsyncDone ||
HloDataflowAnalysis::IsInPlaceOperation(hlo->opcode()) ||
hlo->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
if (hlo == computation_->root_instruction() &&
!bfloat16_support_->SupportsMixedPrecisions(*hlo)) {
return absl::OkStatus();
}
return TryFoldBF16Conversions(hlo);
}
absl::Status BFloat16ConversionFoldingVisitor::HandleAllReduce(
HloInstruction* crs) {
if (crs->HasSideEffectNoRecurse()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(DefaultAction(crs));
if (!bfloat16_support_->SupportsMixedPrecisions(*crs)) {
return absl::OkStatus();
}
if (!crs->shape().IsTuple()) {
return absl::OkStatus();
}
if (crs == computation_->root_instruction()) {
return absl::OkStatus();
}
std::vector<std::vector<HloInstruction*>> per_tuple_element_gtes(
crs->operand_count());
for (auto user : crs->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return absl::OkStatus();
}
per_tuple_element_gtes[user->tuple_index()].push_back(user);
}
for (int64_t i = 0; i < crs->operand_count(); ++i) {
auto all_gte_users_are_bf16_convert = [&per_tuple_element_gtes, i]() {
if (per_tuple_element_gtes[i].empty()) {
return false;
}
for (auto gte : per_tuple_element_gtes[i]) {
if (!AllUsersAreF32ToBF16Converts(gte)) {
return false;
}
}
return true;
};
if (!all_gte_users_are_bf16_convert()) {
continue;
}
ShapeUtil::GetMutableSubshape(crs->mutable_shape(), {i})
->set_element_type(BF16);
bfloat16_conversion_folding_->UpdateLayout(
ShapeUtil::GetMutableSubshape(crs->mutable_shape(), {i}));
for (auto gte : per_tuple_element_gtes[i]) {
TF_RETURN_IF_ERROR(FoldOutputConversions(gte));
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> BFloat16ConversionFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "BFloat16ConversionFolding::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
if (BFloat16ConversionFoldingVisitor::Run(comp, bfloat16_support_, this)) {
changed = true;
}
}
XLA_VLOG_LINES(
2, "BFloat16ConversionFolding::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/bfloat16_conversion_folding.h"
#include <cstdint>
#include <optional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/float_support.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
class TestBFloat16Support : public FloatSupport {
public:
TestBFloat16Support() : FloatSupport(BF16) {}
~TestBFloat16Support() override {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd ||
hlo.opcode() == HloOpcode::kSubtract ||
hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
if (hlo.opcode() == HloOpcode::kAdd || hlo.opcode() == HloOpcode::kTuple ||
hlo.opcode() == HloOpcode::kGetTupleElement ||
hlo.opcode() == HloOpcode::kAllReduce) {
return true;
}
return false;
}
};
class BFloat16ConversionFoldingTest : public HloTestBase {
protected:
BFloat16ConversionFoldingTest()
: HloTestBase(false,
true) {}
bool FoldConversions(HloModule* module) {
TestBFloat16Support bfloat16_support_;
BFloat16ConversionFolding fold(&bfloat16_support_);
absl::StatusOr<bool> result = fold.Run(module);
EXPECT_IS_OK(result.status());
return result.value();
}
};
TEST_F(BFloat16ConversionFoldingTest, FoldIfSupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, convert1, c));
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), BF16);
EXPECT_EQ(add1->operand(0), add0);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldIfUnsupported) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* mul0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kMultiply, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, mul0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* mul1 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kMultiply, convert1, c));
HloInstruction* convert2 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, mul1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert2);
EXPECT_EQ(mul0->shape().element_type(), F32);
EXPECT_EQ(mul1->shape().element_type(), F32);
EXPECT_EQ(mul1->operand(0), convert1);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldUnsupportedMixedPrecision) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32_shape, "c"));
HloInstruction* sub0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kSubtract, a, b));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, sub0));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(f32_shape, convert0));
HloInstruction* sub1 = builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kSubtract, convert1, c));
HloInstruction* convert2 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, sub1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert2);
EXPECT_EQ(sub0->shape().element_type(), F32);
EXPECT_EQ(sub1->shape().element_type(), F32);
EXPECT_EQ(sub1->operand(0), convert1);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldTuple) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, b));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({a, convert0}));
HloInstruction* gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 0));
HloInstruction* convert1 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), convert1);
EXPECT_EQ(gte->shape().element_type(), F32);
EXPECT_EQ(tuple->operand(1), convert0);
}
TEST_F(BFloat16ConversionFoldingTest, DoNotFoldAsyncOp) {
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
auto module = CreateNewVerifiedModule();
auto async_computation_builder = HloComputation::Builder("async_computation");
HloInstruction* async_a = async_computation_builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "async_a"));
HloInstruction* async_b = async_computation_builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "async_b"));
HloInstruction* add =
async_computation_builder.AddInstruction(HloInstruction::CreateBinary(
f32_shape, HloOpcode::kAdd, async_a, async_b));
HloComputation* async_computation =
module->AddEmbeddedComputation(async_computation_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "a"));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, bf16_shape, "b"));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, b));
HloInstruction* async_start =
builder.AddInstruction(HloInstruction::CreateAsyncStart(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({f32_shape, f32_shape}), f32_shape,
ShapeUtil::MakeScalarShape(U32)}),
{a, convert0}, async_computation));
HloInstruction* async_done = builder.AddInstruction(
HloInstruction::CreateAsyncDone(f32_shape, async_start));
HloInstruction* convert1 = builder.AddInstruction(
HloInstruction::CreateConvert(bf16_shape, async_done));
HloComputation* computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(FoldConversions(module.get()));
EXPECT_EQ(async_computation->root_instruction(), add);
EXPECT_EQ(computation->root_instruction(), convert1);
EXPECT_EQ(async_done->shape().element_type(), F32);
EXPECT_EQ(async_start->operand(1), convert0);
}
TEST_F(BFloat16ConversionFoldingTest, FoldAllReduceTupleOutput) {
auto builder = HloComputation::Builder(TestName());
auto module = CreateNewVerifiedModule();
HloComputation::Builder sum_builder("add");
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, x, y));
HloComputation* sum = module->AddEmbeddedComputation(sum_builder.Build());
Shape f32_shape = ShapeUtil::MakeShape(F32, {2, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {2, 4});
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateParameter(0, bf16_shape, "a"));
HloInstruction* convert_a =
builder.AddInstruction(HloInstruction::CreateConvert(f32_shape, a));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32_shape, "b"));
HloInstruction* crs = builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({f32_shape, f32_shape}), {convert_a, b}, sum,
CollectiveDeviceList(),
false,
std::nullopt, false));
HloInstruction* gte_a = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, crs, 0));
HloInstruction* gte_b = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, crs, 1));
HloInstruction* convert_gte_b =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte_b));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({gte_a, convert_gte_b}));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(FoldConversions(module.get()));
EXPECT_EQ(computation->root_instruction(), tuple);
EXPECT_EQ(tuple->operand(0), gte_a);
EXPECT_EQ(tuple->operand(1), gte_b);
EXPECT_EQ(gte_a->shape().element_type(), F32);
EXPECT_EQ(gte_b->shape().element_type(), BF16);
EXPECT_EQ(crs->operand(0), a);
EXPECT_EQ(crs->operand(1), b);
EXPECT_EQ(a->shape().element_type(), BF16);
EXPECT_EQ(b->shape().element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {0}).element_type(), F32);
EXPECT_EQ(ShapeUtil::GetSubshape(crs->shape(), {1}).element_type(), BF16);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_conversion_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_conversion_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c45b2d03-f0b3-41f2-a12e-10706399826b | cpp | tensorflow/tensorflow | async_value_tensor | tensorflow/core/tfrt/common/async_value_tensor.cc | tensorflow/core/tfrt/common/async_value_tensor_test.cc | #include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/check.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/tensor.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace {
constexpr uintptr_t kTag = 0x1ULL;
}
AsyncValueTensor* AsyncValueTensor::FromTensor(
const Tensor* tensor) {
AsyncValueTensor* av_tensor =
FromOpaquePointer(const_cast<char*>(tensor->tensor_data().data()));
return av_tensor;
}
const tfrt::RCReference<tfrt::AsyncValue>& AsyncValueTensor::GetAsyncRef() {
return av_ref_;
}
void AsyncValueTensor::SetAsyncRef(tfrt::RCReference<tfrt::AsyncValue> av_ref) {
av_ref_ = std::move(av_ref);
}
std::shared_ptr<xla::PjRtBuffer> AsyncValueTensor::GetBuffer() {
return buffer_;
}
void AsyncValueTensor::SetBuffer(std::shared_ptr<xla::PjRtBuffer> buffer) {
buffer_ = std::move(buffer);
}
AsyncValueTensor* AsyncValueTensor::FromOpaquePointer(void* ptr) {
uintptr_t value = reinterpret_cast<uintptr_t>(ptr);
if (value & kTag) {
return reinterpret_cast<AsyncValueTensor*>(value & ~kTag);
} else {
return nullptr;
}
}
void* AsyncValueTensor::ToOpaquePointer(AsyncValueTensor* tensor) {
uintptr_t value = reinterpret_cast<uintptr_t>(tensor);
CHECK_EQ(value & kTag, 0);
value |= kTag;
return reinterpret_cast<AsyncValueTensor*>(value);
}
void* AsyncValueAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
return AsyncValueTensor::ToOpaquePointer(new AsyncValueTensor);
}
void AsyncValueAllocator::DeallocateRaw(void* ptr) {
delete AsyncValueTensor::FromOpaquePointer(ptr);
}
} | #include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
TEST(AsyncValueTensorTest, InvalidTensor) {
tensorflow::Tensor tensor(tensorflow::DT_INT64, tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_EQ(avt, nullptr);
}
TEST(AsyncValueTensorTest, SetAndGetAsyncValue) {
AsyncValueAllocator allocator;
tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64,
tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_NE(avt, nullptr);
tsl::AsyncValueRef<int32_t> value =
tsl::MakeConstructedAsyncValueRef<int32_t>(123);
avt->SetAsyncRef(value.CopyRCRef());
auto ret_value = avt->GetAsyncRef();
ASSERT_EQ(ret_value, value.CopyRCRef());
}
TEST(AsyncValueTensorTest, SetAndGetBuffer) {
AsyncValueAllocator allocator;
tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64,
tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_NE(avt, nullptr);
std::shared_ptr<xla::PjRtBuffer> buffer;
avt->SetBuffer(buffer);
auto ret_buffer = avt->GetBuffer();
ASSERT_EQ(ret_buffer, buffer);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/async_value_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/async_value_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
728c532d-716d-43c9-8ea2-fe54b64e4765 | cpp | tensorflow/tensorflow | buffered_inputstream | third_party/xla/xla/tsl/lib/io/buffered_inputstream.cc | third_party/xla/xla/tsl/lib/io/buffered_inputstream_test.cc | #include "xla/tsl/lib/io/buffered_inputstream.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/io/random_inputstream.h"
namespace tsl {
namespace io {
BufferedInputStream::BufferedInputStream(InputStreamInterface* input_stream,
size_t buffer_bytes,
bool owns_input_stream)
: input_stream_(input_stream),
size_(buffer_bytes),
owns_input_stream_(owns_input_stream) {
buf_.reserve(size_);
}
BufferedInputStream::BufferedInputStream(RandomAccessFile* file,
size_t buffer_bytes)
: BufferedInputStream(new RandomAccessInputStream(file), buffer_bytes,
true) {}
BufferedInputStream::~BufferedInputStream() {
if (owns_input_stream_) {
delete input_stream_;
}
}
absl::Status BufferedInputStream::FillBuffer() {
if (!file_status_.ok()) {
pos_ = 0;
limit_ = 0;
return file_status_;
}
absl::Status s = input_stream_->ReadNBytes(size_, &buf_);
pos_ = 0;
limit_ = buf_.size();
if (!s.ok()) {
file_status_ = s;
}
return s;
}
template <typename StringType>
absl::Status BufferedInputStream::ReadLineHelper(StringType* result,
bool include_eol) {
result->clear();
absl::Status s;
size_t start_pos = pos_;
while (true) {
if (pos_ == limit_) {
result->append(buf_.data() + start_pos, pos_ - start_pos);
s = FillBuffer();
if (limit_ == 0) {
break;
}
start_pos = pos_;
}
char c = buf_[pos_];
if (c == '\n') {
result->append(buf_.data() + start_pos, pos_ - start_pos);
if (include_eol) {
result->append(1, c);
}
pos_++;
return absl::OkStatus();
}
if (c == '\r') {
result->append(buf_.data() + start_pos, pos_ - start_pos);
start_pos = pos_ + 1;
}
pos_++;
}
if (absl::IsOutOfRange(s) && !result->empty()) {
return absl::OkStatus();
}
return s;
}
absl::Status BufferedInputStream::ReadNBytes(int64_t bytes_to_read,
tstring* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
result->clear();
if (pos_ == limit_ && !file_status_.ok() && bytes_to_read > 0) {
return file_status_;
}
result->reserve(bytes_to_read);
absl::Status s;
while (result->size() < static_cast<size_t>(bytes_to_read)) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == 0) {
DCHECK(!s.ok());
file_status_ = s;
break;
}
}
const int64_t bytes_to_copy =
std::min<int64_t>(limit_ - pos_, bytes_to_read - result->size());
result->insert(result->size(), buf_, pos_, bytes_to_copy);
pos_ += bytes_to_copy;
}
if (absl::IsOutOfRange(s) &&
(result->size() == static_cast<size_t>(bytes_to_read))) {
return absl::OkStatus();
}
return s;
}
absl::Status BufferedInputStream::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can only skip forward, not ",
bytes_to_skip);
}
if (pos_ + bytes_to_skip < limit_) {
pos_ += bytes_to_skip;
} else {
absl::Status s = input_stream_->SkipNBytes(bytes_to_skip - (limit_ - pos_));
pos_ = 0;
limit_ = 0;
if (absl::IsOutOfRange(s)) {
file_status_ = s;
}
return s;
}
return absl::OkStatus();
}
int64_t BufferedInputStream::Tell() const {
return input_stream_->Tell() - (limit_ - pos_);
}
absl::Status BufferedInputStream::Seek(int64_t position) {
if (position < 0) {
return errors::InvalidArgument("Seeking to a negative position: ",
position);
}
const int64_t buf_lower_limit = input_stream_->Tell() - limit_;
if (position < buf_lower_limit) {
TF_RETURN_IF_ERROR(Reset());
return SkipNBytes(position);
}
if (position < Tell()) {
pos_ -= Tell() - position;
return absl::OkStatus();
}
return SkipNBytes(position - Tell());
}
template <typename T>
absl::Status BufferedInputStream::ReadAll(T* result) {
result->clear();
absl::Status status;
while (status.ok()) {
status = FillBuffer();
if (limit_ == 0) {
break;
}
result->append(buf_);
pos_ = limit_;
}
if (absl::IsOutOfRange(status)) {
file_status_ = status;
return absl::OkStatus();
}
return status;
}
template Status BufferedInputStream::ReadAll<std::string>(std::string* result);
template Status BufferedInputStream::ReadAll<tstring>(tstring* result);
absl::Status BufferedInputStream::Reset() {
TF_RETURN_IF_ERROR(input_stream_->Reset());
pos_ = 0;
limit_ = 0;
file_status_ = absl::OkStatus();
return absl::OkStatus();
}
absl::Status BufferedInputStream::ReadLine(std::string* result) {
return ReadLineHelper(result, false);
}
absl::Status BufferedInputStream::ReadLine(tstring* result) {
return ReadLineHelper(result, false);
}
std::string BufferedInputStream::ReadLineAsString() {
std::string result;
ReadLineHelper(&result, true).IgnoreError();
return result;
}
absl::Status BufferedInputStream::SkipLine() {
absl::Status s;
bool skipped = false;
while (true) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == 0) {
break;
}
}
char c = buf_[pos_++];
skipped = true;
if (c == '\n') {
return absl::OkStatus();
}
}
if (absl::IsOutOfRange(s) && skipped) {
return absl::OkStatus();
}
return s;
}
}
} | #include "xla/tsl/lib/io/buffered_inputstream.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/random_inputstream.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace io {
namespace {
static std::vector<int> BufferSizes() {
return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
class ReadOnceInputStream : public InputStreamInterface {
public:
ReadOnceInputStream() : start_(true) {}
virtual absl::Status ReadNBytes(int64_t bytes_to_read, tstring* result) {
if (bytes_to_read < 11) {
return errors::InvalidArgument("Not reading all bytes: ", bytes_to_read);
}
if (start_) {
*result = "0123456789";
start_ = false;
return errors::OutOfRange("Out of range.");
}
return errors::InvalidArgument(
"Redudant call to ReadNBytes after an OutOfRange error.");
}
int64_t Tell() const override { return start_ ? 0 : 10; }
absl::Status Reset() override {
start_ = true;
return absl::OkStatus();
}
private:
bool start_;
};
TEST(BufferedInputStream, ReadLine_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_CRLF) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, SkipLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.SkipLine());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
}
}
TEST(BufferedInputStream, SkipLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.SkipLine());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
}
}
TEST(BufferedInputStream, SkipLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\n\n\nline two"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
}
}
TEST(BufferedInputStream, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, OutOfRangeCache) {
for (auto buf_size : BufferSizes()) {
if (buf_size < 11) {
continue;
}
ReadOnceInputStream input_stream;
tstring read;
BufferedInputStream in(&input_stream, buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK((in.ReadNBytes(7, &read)));
EXPECT_EQ(read, "3456789");
EXPECT_EQ(10, in.Tell());
absl::Status s = in.ReadNBytes(5, &read);
EXPECT_EQ(error::OUT_OF_RANGE, s.code()) << s;
EXPECT_EQ(read, "");
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
}
}
TEST(BufferedInputStream, SkipNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, ReadNBytesRandomAccessFile) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
tstring read;
BufferedInputStream in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, SkipNBytesRandomAccessFile) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
tstring read;
BufferedInputStream in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, Seek) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
TF_ASSERT_OK(in.Seek(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.Seek(1));
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "1234");
EXPECT_EQ(5, in.Tell());
}
}
TEST(BufferedInputStream, Seek_NotReset) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), 3);
TF_ASSERT_OK(in.ReadNBytes(4, &read));
int before_tell = input_stream.get()->Tell();
EXPECT_EQ(before_tell, 6);
TF_ASSERT_OK(in.Seek(3));
int after_tell = input_stream.get()->Tell();
EXPECT_EQ(before_tell, after_tell);
}
TEST(BufferedInputStream, ReadAll_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string expected = "";
TF_ASSERT_OK(WriteStringToFile(env, fname, expected));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
RandomAccessInputStream input_stream(file.get());
BufferedInputStream in(&input_stream, buf_size);
string contents;
TF_ASSERT_OK(in.ReadAll(&contents));
EXPECT_EQ(expected, contents);
}
}
TEST(BufferedInputStream, ReadAll_Text) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string expected = "line one\nline two\nline three";
TF_ASSERT_OK(WriteStringToFile(env, fname, expected));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
RandomAccessInputStream input_stream(file.get());
BufferedInputStream in(&input_stream, buf_size);
string contents;
TF_ASSERT_OK(in.ReadAll(&contents));
EXPECT_EQ(expected, contents);
}
}
void BM_BufferedReaderSmallReads(::testing::benchmark::State& state) {
const int buff_size = state.range(0);
const int file_size = state.range(1);
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string file_elem = "0123456789";
std::unique_ptr<WritableFile> write_file;
TF_ASSERT_OK(env->NewWritableFile(fname, &write_file));
for (int i = 0; i < file_size; ++i) {
TF_ASSERT_OK(write_file->Append(file_elem));
}
TF_ASSERT_OK(write_file->Close());
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring result;
int itr = 0;
for (auto s : state) {
BufferedInputStream in(file.get(), buff_size);
for (int64_t i = 0; i < 10 * file_size; ++i) {
TF_ASSERT_OK(in.ReadNBytes(1, &result))
<< "i: " << i << " itr: " << itr << " buff_size: " << buff_size
<< " file size: " << file_size;
}
++itr;
}
}
BENCHMARK(BM_BufferedReaderSmallReads)
->ArgPair(1, 5)
->ArgPair(1, 1024)
->ArgPair(10, 5)
->ArgPair(10, 1024)
->ArgPair(1024, 1024)
->ArgPair(1024 * 1024, 1024)
->ArgPair(1024 * 1024, 1024 * 1024)
->ArgPair(256 * 1024 * 1024, 1024);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_inputstream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_inputstream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eae41410-587c-4d36-bdfa-6cbf3503e709 | cpp | tensorflow/tensorflow | scheduling_instruction_annotator | third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator.cc | third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator_test.cc | #include "xla/service/gpu/transforms/scheduling_instruction_annotator.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
absl::StatusOr<bool> AnnotateSchedulingInstructionNames(
HloComputation& computation) {
bool changed = false;
for (HloInstruction* inst : computation.instructions()) {
if (!inst->metadata().scheduling_name().empty()) {
continue;
}
if (inst->opcode() == HloOpcode::kConstant) {
continue;
}
inst->set_metadata_scheduling_name(inst->name());
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> SchedulingInstructionAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
CHECK(module->has_schedule())
<< "The pass is supposed to run in the beginning of post-scheduling!";
bool changed = false;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result,
AnnotateSchedulingInstructionNames(*computation));
changed |= result;
}
return changed;
}
} | #include "xla/service/gpu/transforms/scheduling_instruction_annotator.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using SchedulingInstructionAnnotatorTest = HloTestBase;
TEST_F(SchedulingInstructionAnnotatorTest,
AnnotatesAllInstructionsWithTheirRespectiveNames) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[1] parameter(0)
p1 = f32[1] parameter(1)
add0 = f32[1] add(p0,p1)
ROOT exp0 = f32[1] exponential(add0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
SchedulingInstructionAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
ASSERT_TRUE(changed);
for (const auto* comp : module->computations()) {
for (const auto* instruction : comp->instructions()) {
EXPECT_EQ(instruction->name(), instruction->metadata().scheduling_name());
}
}
constexpr absl::string_view kExpected = R"(
)";
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions().set_print_operand_shape(false)),
kExpected));
EXPECT_TRUE(filecheck_matches);
}
TEST_F(SchedulingInstructionAnnotatorTest, SkipsAnnotatingConstants) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[1] parameter(0)
c1 = f32[1] constant(42)
ROOT add0 = f32[1] add(p0, c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
SchedulingInstructionAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
ASSERT_TRUE(changed);
constexpr absl::string_view kExpected = R"(
)";
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions().set_print_operand_shape(false)),
kExpected));
EXPECT_TRUE(filecheck_matches);
}
TEST_F(SchedulingInstructionAnnotatorTest,
DoesNotAnnotateAllInstructionsWithTheirRespectiveNames) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[1] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[1] parameter(1), metadata={scheduling_name="p1"}
add0 = f32[1] add(p0,p1), metadata={scheduling_name="add0"}
ROOT exp0 = f32[1] exponential(add0), metadata={scheduling_name="exp0"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
SchedulingInstructionAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6fd03f0-c227-443f-88a6-f4d89d5469bf | cpp | tensorflow/tensorflow | tensor_shape_utils | tensorflow/c/kernels/tensor_shape_utils.cc | tensorflow/c/kernels/tensor_shape_utils_test.cc | #include "tensorflow/c/kernels/tensor_shape_utils.h"
#include <string>
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
std::string ShapeDebugString(TF_Tensor* tensor) {
CHECK_GE(TF_NumDims(tensor), 0);
tensorflow::string s = "[";
for (int i = 0; i < TF_NumDims(tensor); ++i) {
if (i > 0) tensorflow::strings::StrAppend(&s, ",");
int64_t dim = TF_Dim(tensor, i);
CHECK_GE(dim, 0);
tensorflow::strings::StrAppend(&s, dim);
}
tensorflow::strings::StrAppend(&s, "]");
return s;
}
} | #include "tensorflow/c/kernels/tensor_shape_utils.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
struct TF_TensorWrapper {
TF_Tensor* tf_tensor;
explicit TF_TensorWrapper(TF_Tensor* tensor) { tf_tensor = tensor; }
~TF_TensorWrapper() { TF_DeleteTensor(tf_tensor); }
};
void TestShapeMatch(TensorShape shape) {
Tensor tensor(DT_FLOAT, shape);
Status status;
TF_Tensor* tf_tensor = TF_TensorFromTensor(tensor, &status);
TF_TensorWrapper tensor_wrapper = TF_TensorWrapper(tf_tensor);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(tensor.shape().DebugString(), ShapeDebugString(tf_tensor));
}
TEST(ShapeDebugString, RegularShape) { TestShapeMatch(TensorShape({5, 4, 7})); }
TEST(ShapeDebugString, ScalarShape) { TestShapeMatch(TensorShape({})); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/tensor_shape_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/tensor_shape_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
25cb9de2-96c2-423a-906a-e16a2739e1c9 | cpp | tensorflow/tensorflow | ram_file_block_cache | tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc | tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc | #include "tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.h"
#include <cstring>
#include <memory>
#include <sstream>
#include <utility>
#include "absl/synchronization/mutex.h"
#include "tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h"
namespace tf_gcs_filesystem {
bool RamFileBlockCache::BlockNotStale(const std::shared_ptr<Block>& block) {
absl::MutexLock l(&block->mu);
if (block->state != FetchState::FINISHED) {
return true;
}
if (max_staleness_ == 0) return true;
return timer_seconds_() - block->timestamp <= max_staleness_;
}
std::shared_ptr<RamFileBlockCache::Block> RamFileBlockCache::Lookup(
const Key& key) {
absl::MutexLock lock(&mu_);
auto entry = block_map_.find(key);
if (entry != block_map_.end()) {
if (BlockNotStale(entry->second)) {
return entry->second;
} else {
RemoveFile_Locked(key.first);
}
}
auto new_entry = std::make_shared<Block>();
lru_list_.push_front(key);
lra_list_.push_front(key);
new_entry->lru_iterator = lru_list_.begin();
new_entry->lra_iterator = lra_list_.begin();
new_entry->timestamp = timer_seconds_();
block_map_.emplace(std::make_pair(key, new_entry));
return new_entry;
}
void RamFileBlockCache::Trim() {
while (!lru_list_.empty() && cache_size_ > max_bytes_) {
RemoveBlock(block_map_.find(lru_list_.back()));
}
}
void RamFileBlockCache::UpdateLRU(const Key& key,
const std::shared_ptr<Block>& block,
TF_Status* status) {
absl::MutexLock lock(&mu_);
if (block->timestamp == 0) {
return TF_SetStatus(status, TF_OK, "");
}
if (block->lru_iterator != lru_list_.begin()) {
lru_list_.erase(block->lru_iterator);
lru_list_.push_front(key);
block->lru_iterator = lru_list_.begin();
}
if (block->data.size() < block_size_) {
Key fmax = std::make_pair(key.first, std::numeric_limits<size_t>::max());
auto fcmp = block_map_.upper_bound(fmax);
if (fcmp != block_map_.begin() && key < (--fcmp)->first) {
return TF_SetStatus(status, TF_INTERNAL,
"Block cache contents are inconsistent.");
}
}
Trim();
return TF_SetStatus(status, TF_OK, "");
}
void RamFileBlockCache::MaybeFetch(const Key& key,
const std::shared_ptr<Block>& block,
TF_Status* status) {
bool downloaded_block = false;
auto reconcile_state = MakeCleanup([this, &downloaded_block, &key, &block] {
if (downloaded_block) {
absl::MutexLock l(&mu_);
if (block->timestamp != 0) {
cache_size_ += block->data.capacity();
lra_list_.erase(block->lra_iterator);
lra_list_.push_front(key);
block->lra_iterator = lra_list_.begin();
block->timestamp = timer_seconds_();
}
}
});
absl::MutexLock l(&block->mu);
TF_SetStatus(status, TF_OK, "");
while (true) {
switch (block->state) {
case FetchState::ERROR:
case FetchState::CREATED:
block->state = FetchState::FETCHING;
block->mu.Unlock();
block->data.clear();
block->data.resize(block_size_, 0);
int64_t bytes_transferred;
bytes_transferred = block_fetcher_(key.first, key.second, block_size_,
block->data.data(), status);
block->mu.Lock();
if (TF_GetCode(status) == TF_OK) {
block->data.resize(bytes_transferred, 0);
std::vector<char>(block->data).swap(block->data);
downloaded_block = true;
block->state = FetchState::FINISHED;
} else {
block->state = FetchState::ERROR;
}
block->cond_var.SignalAll();
return;
case FetchState::FETCHING:
block->cond_var.WaitWithTimeout(&block->mu, absl::Minutes(1));
if (block->state == FetchState::FINISHED) {
return TF_SetStatus(status, TF_OK, "");
}
break;
case FetchState::FINISHED:
return TF_SetStatus(status, TF_OK, "");
}
}
return TF_SetStatus(
status, TF_INTERNAL,
"Control flow should never reach the end of RamFileBlockCache::Fetch.");
}
int64_t RamFileBlockCache::Read(const std::string& filename, size_t offset,
size_t n, char* buffer, TF_Status* status) {
if (n == 0) {
TF_SetStatus(status, TF_OK, "");
return 0;
}
if (!IsCacheEnabled() || (n > max_bytes_)) {
return block_fetcher_(filename, offset, n, buffer, status);
}
size_t start = block_size_ * (offset / block_size_);
size_t finish = block_size_ * ((offset + n) / block_size_);
if (finish < offset + n) {
finish += block_size_;
}
size_t total_bytes_transferred = 0;
for (size_t pos = start; pos < finish; pos += block_size_) {
Key key = std::make_pair(filename, pos);
std::shared_ptr<Block> block = Lookup(key);
if (!block) {
std::cerr << "No block for key " << key.first << "@" << key.second;
abort();
}
MaybeFetch(key, block, status);
if (TF_GetCode(status) != TF_OK) return -1;
UpdateLRU(key, block, status);
if (TF_GetCode(status) != TF_OK) return -1;
const auto& data = block->data;
if (offset >= pos + data.size()) {
std::stringstream os;
os << "EOF at offset " << offset << " in file " << filename
<< " at position " << pos << " with data size " << data.size();
TF_SetStatus(status, TF_OUT_OF_RANGE, std::move(os).str().c_str());
return total_bytes_transferred;
}
auto begin = data.begin();
if (offset > pos) {
begin += offset - pos;
}
auto end = data.end();
if (pos + data.size() > offset + n) {
end -= (pos + data.size()) - (offset + n);
}
if (begin < end) {
size_t bytes_to_copy = end - begin;
memcpy(&buffer[total_bytes_transferred], &*begin, bytes_to_copy);
total_bytes_transferred += bytes_to_copy;
}
if (data.size() < block_size_) {
break;
}
}
TF_SetStatus(status, TF_OK, "");
return total_bytes_transferred;
}
bool RamFileBlockCache::ValidateAndUpdateFileSignature(
const std::string& filename, int64_t file_signature) {
absl::MutexLock lock(&mu_);
auto it = file_signature_map_.find(filename);
if (it != file_signature_map_.end()) {
if (it->second == file_signature) {
return true;
}
RemoveFile_Locked(filename);
it->second = file_signature;
return false;
}
file_signature_map_[filename] = file_signature;
return true;
}
size_t RamFileBlockCache::CacheSize() const {
absl::MutexLock lock(&mu_);
return cache_size_;
}
void RamFileBlockCache::Prune() {
while (!stop_pruning_thread_.WaitForNotificationWithTimeout(
absl::Microseconds(1000000))) {
absl::MutexLock lock(&mu_);
uint64_t now = timer_seconds_();
while (!lra_list_.empty()) {
auto it = block_map_.find(lra_list_.back());
if (now - it->second->timestamp <= max_staleness_) {
break;
}
RemoveFile_Locked(std::string(it->first.first));
}
}
}
void RamFileBlockCache::Flush() {
absl::MutexLock lock(&mu_);
block_map_.clear();
lru_list_.clear();
lra_list_.clear();
cache_size_ = 0;
}
void RamFileBlockCache::RemoveFile(const std::string& filename) {
absl::MutexLock lock(&mu_);
RemoveFile_Locked(filename);
}
void RamFileBlockCache::RemoveFile_Locked(const std::string& filename) {
Key begin = std::make_pair(filename, 0);
auto it = block_map_.lower_bound(begin);
while (it != block_map_.end() && it->first.first == filename) {
auto next = std::next(it);
RemoveBlock(it);
it = next;
}
}
void RamFileBlockCache::RemoveBlock(BlockMap::iterator entry) {
entry->second->timestamp = 0;
lru_list_.erase(entry->second->lru_iterator);
lra_list_.erase(entry->second->lra_iterator);
cache_size_ -= entry->second->data.capacity();
block_map_.erase(entry);
}
} | #include "tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.h"
#include <cstring>
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/cloud/now_seconds_env.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status ReadCache(tf_gcs_filesystem::RamFileBlockCache* cache,
const string& filename, size_t offset, size_t n,
std::vector<char>* out) {
out->clear();
out->resize(n, 0);
TF_Status status;
auto bytes_transferred =
cache->Read(filename, offset, n, out->data(), &status);
if (bytes_transferred >= 0) {
EXPECT_LE(bytes_transferred, n);
out->resize(bytes_transferred, n);
}
return status.status;
}
TEST(RamFileBlockCacheTest, IsCacheEnabled) {
auto fetcher = [](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
TF_SetStatus(status, TF_OK, "");
return 0;
};
tf_gcs_filesystem::RamFileBlockCache cache1(0, 0, 0, fetcher);
tf_gcs_filesystem::RamFileBlockCache cache2(16, 0, 0, fetcher);
tf_gcs_filesystem::RamFileBlockCache cache3(0, 32, 0, fetcher);
tf_gcs_filesystem::RamFileBlockCache cache4(16, 32, 0, fetcher);
EXPECT_FALSE(cache1.IsCacheEnabled());
EXPECT_FALSE(cache2.IsCacheEnabled());
EXPECT_FALSE(cache3.IsCacheEnabled());
EXPECT_TRUE(cache4.IsCacheEnabled());
}
TEST(RamFileBlockCacheTest, ValidateAndUpdateFileSignature) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
calls++;
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
string filename = "file";
tf_gcs_filesystem::RamFileBlockCache cache(16, 32, 0, fetcher);
std::vector<char> out;
EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 1);
EXPECT_TRUE(cache.ValidateAndUpdateFileSignature(filename, 123));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 1);
EXPECT_FALSE(cache.ValidateAndUpdateFileSignature(filename, 321));
TF_EXPECT_OK(ReadCache(&cache, filename, 0, 16, &out));
EXPECT_EQ(calls, 2);
}
TEST(RamFileBlockCacheTest, PassThrough) {
const string want_filename = "foo/bar";
const size_t want_offset = 42;
const size_t want_n = 1024;
int calls = 0;
auto fetcher = [&calls, want_filename, want_offset, want_n](
const string& got_filename, size_t got_offset,
size_t got_n, char* buffer, TF_Status* status) -> int64_t {
EXPECT_EQ(got_filename, want_filename);
EXPECT_EQ(got_offset, want_offset);
EXPECT_EQ(got_n, want_n);
calls++;
memset(buffer, 'x', got_n);
TF_SetStatus(status, TF_OK, "");
return got_n;
};
tf_gcs_filesystem::RamFileBlockCache cache1(1, 0, 0, fetcher);
tf_gcs_filesystem::RamFileBlockCache cache2(0, 1, 0, fetcher);
tf_gcs_filesystem::RamFileBlockCache cache3(0, 0, 0, fetcher);
tf_gcs_filesystem::RamFileBlockCache cache4(1000, 1000, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache1, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 1);
TF_EXPECT_OK(ReadCache(&cache2, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 2);
TF_EXPECT_OK(ReadCache(&cache3, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache4, want_filename, want_offset, want_n, &out));
EXPECT_EQ(calls, 4);
}
TEST(RamFileBlockCacheTest, BlockAlignment) {
const size_t size = 256;
std::vector<char> buf;
buf.reserve(size);
for (int i = 0; i < size; i++) {
buf.push_back(i);
}
auto fetcher = [&buf](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
int64_t bytes_transferred;
if (offset < buf.size()) {
size_t bytes_to_copy = std::min<size_t>(buf.size() - offset, n);
memcpy(buffer, buf.data() + offset, bytes_to_copy);
bytes_transferred = bytes_to_copy;
} else {
bytes_transferred = 0;
}
TF_SetStatus(status, TF_OK, "");
return bytes_transferred;
};
for (size_t block_size = 2; block_size <= 4; block_size++) {
tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0,
fetcher);
for (size_t offset = 0; offset < 10; offset++) {
for (size_t n = block_size - 2; n <= block_size + 2; n++) {
std::vector<char> got;
TF_EXPECT_OK(ReadCache(&cache, "", offset, n, &got));
if (offset + n <= size) {
EXPECT_EQ(got.size(), n) << "block size = " << block_size
<< ", offset = " << offset << ", n = " << n;
} else {
EXPECT_EQ(got.size(), size - offset)
<< "block size = " << block_size << ", offset = " << offset
<< ", n = " << n;
}
std::vector<char>::const_iterator begin = buf.begin() + offset;
std::vector<char>::const_iterator end =
offset + n > buf.size() ? buf.end() : begin + n;
std::vector<char> want(begin, end);
EXPECT_EQ(got, want) << "block size = " << block_size
<< ", offset = " << offset << ", n = " << n;
}
}
}
}
TEST(RamFileBlockCacheTest, CacheHits) {
const size_t block_size = 16;
std::set<size_t> calls;
auto fetcher = [&calls, block_size](const string& filename, size_t offset,
size_t n, char* buffer,
TF_Status* status) -> int64_t {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
EXPECT_EQ(calls.find(offset), calls.end()) << "at offset " << offset;
calls.insert(offset);
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
const uint32 block_count = 256;
tf_gcs_filesystem::RamFileBlockCache cache(
block_size, block_count * block_size, 0, fetcher);
std::vector<char> out;
out.resize(block_count, 0);
for (int i = 0; i < 2; i++) {
for (int j = 0; j < block_count; j++) {
TF_EXPECT_OK(ReadCache(&cache, "", block_size * j, block_size, &out));
}
}
}
TEST(RamFileBlockCacheTest, OutOfRange) {
const size_t block_size = 16;
const size_t file_size = 24;
bool first_block = false;
bool second_block = false;
auto fetcher = [block_size, file_size, &first_block, &second_block](
const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
size_t bytes_to_copy = 0;
if (offset == 0) {
memset(buffer, 'x', n);
bytes_to_copy = n;
first_block = true;
} else if (offset == block_size) {
bytes_to_copy = file_size - block_size;
memset(buffer, 'x', bytes_to_copy);
second_block = true;
}
TF_SetStatus(status, TF_OK, "");
return bytes_to_copy;
};
tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0,
fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size, &out));
EXPECT_TRUE(first_block);
EXPECT_EQ(out.size(), block_size);
Status status = ReadCache(&cache, "", file_size + 4, 4, &out);
EXPECT_EQ(status.code(), error::OUT_OF_RANGE);
EXPECT_TRUE(second_block);
second_block = false;
TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out));
EXPECT_FALSE(second_block);
EXPECT_EQ(out.size(), file_size - block_size);
}
TEST(RamFileBlockCacheTest, Inconsistent) {
const size_t block_size = 16;
auto fetcher = [block_size](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset % block_size, 0);
EXPECT_GE(n, 1);
memset(buffer, 'x', 1);
TF_SetStatus(status, TF_OK, "");
return 1;
};
tf_gcs_filesystem::RamFileBlockCache cache(block_size, 2 * block_size, 0,
fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", block_size, block_size, &out));
EXPECT_EQ(out.size(), 1);
Status status = ReadCache(&cache, "", 0, block_size, &out);
EXPECT_EQ(status.code(), error::INTERNAL);
}
TEST(RamFileBlockCacheTest, LRU) {
const size_t block_size = 16;
std::list<size_t> calls;
auto fetcher = [&calls, block_size](const string& filename, size_t offset,
size_t n, char* buffer,
TF_Status* status) -> int64_t {
EXPECT_EQ(n, block_size);
EXPECT_FALSE(calls.empty()) << "at offset = " << offset;
if (!calls.empty()) {
EXPECT_EQ(offset, calls.front());
calls.pop_front();
}
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
const uint32 block_count = 2;
tf_gcs_filesystem::RamFileBlockCache cache(
block_size, block_count * block_size, 0, fetcher);
std::vector<char> out;
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
calls.push_back(block_size);
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
calls.push_back(2 * block_size);
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 2 * block_size, 1, &out));
calls.push_back(block_size);
TF_EXPECT_OK(ReadCache(&cache, "", block_size, 1, &out));
calls.push_back(0);
TF_EXPECT_OK(ReadCache(&cache, "", 0, 1, &out));
}
TEST(RamFileBlockCacheTest, MaxStaleness) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
calls++;
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
std::vector<char> out;
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
tf_gcs_filesystem::RamFileBlockCache cache1(
8, 16, 2 , fetcher,
[&env]() { return env->NowSeconds(); });
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
for (int i = 1; i <= 10; i++) {
env->SetNowSeconds(i + 1);
TF_EXPECT_OK(ReadCache(&cache1, "", 0, 1, &out));
EXPECT_EQ(calls, 1 + i / 3);
}
calls = 0;
env->SetNowSeconds(0);
tf_gcs_filesystem::RamFileBlockCache cache2(
8, 16, 0 , fetcher,
[&env]() { return env->NowSeconds(); });
TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
env->SetNowSeconds(365 * 24 * 60 * 60);
TF_EXPECT_OK(ReadCache(&cache2, "", 0, 1, &out));
EXPECT_EQ(calls, 1);
}
TEST(RamFileBlockCacheTest, RemoveFile) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
calls++;
char c = (filename == "a") ? 'a' : (filename == "b") ? 'b' : 'x';
if (offset > 0) {
c = toupper(c);
}
memset(buffer, c, n);
TF_SetStatus(status, TF_OK, "");
return n;
};
const size_t n = 3;
tf_gcs_filesystem::RamFileBlockCache cache(8, 32, 0, fetcher);
std::vector<char> out;
std::vector<char> a(n, 'a');
std::vector<char> b(n, 'b');
std::vector<char> A(n, 'A');
std::vector<char> B(n, 'B');
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
EXPECT_EQ(calls, 1);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
EXPECT_EQ(calls, 2);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
cache.RemoveFile("a");
TF_EXPECT_OK(ReadCache(&cache, "b", 0, n, &out));
EXPECT_EQ(out, b);
TF_EXPECT_OK(ReadCache(&cache, "b", 8, n, &out));
EXPECT_EQ(out, B);
EXPECT_EQ(calls, 4);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, n, &out));
EXPECT_EQ(out, a);
EXPECT_EQ(calls, 5);
TF_EXPECT_OK(ReadCache(&cache, "a", 8, n, &out));
EXPECT_EQ(out, A);
EXPECT_EQ(calls, 6);
}
TEST(RamFileBlockCacheTest, Prune) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
calls++;
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
std::vector<char> out;
std::unique_ptr<NowSecondsEnv> env(new NowSecondsEnv);
uint64 now = Env::Default()->NowSeconds();
env->SetNowSeconds(now);
tf_gcs_filesystem::RamFileBlockCache cache(
8, 32, 1 , fetcher,
[&env]() { return env->NowSeconds(); });
TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
env->SetNowSeconds(now + 1);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out));
EXPECT_EQ(cache.CacheSize(), 24);
EXPECT_EQ(calls, 3);
TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out));
EXPECT_EQ(calls, 3);
env->SetNowSeconds(now + 2);
uint64 start = Env::Default()->NowSeconds();
do {
Env::Default()->SleepForMicroseconds(100000);
} while (cache.CacheSize() == 24 && Env::Default()->NowSeconds() - start < 3);
EXPECT_EQ(cache.CacheSize(), 8);
TF_EXPECT_OK(ReadCache(&cache, "b", 0, 1, &out));
EXPECT_EQ(calls, 3);
env->SetNowSeconds(now + 3);
start = Env::Default()->NowSeconds();
do {
Env::Default()->SleepForMicroseconds(100000);
} while (cache.CacheSize() == 8 && Env::Default()->NowSeconds() - start < 3);
EXPECT_EQ(cache.CacheSize(), 0);
}
TEST(RamFileBlockCacheTest, ParallelReads) {
const int callers = 4;
BlockingCounter counter(callers);
auto fetcher = [&counter](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
counter.DecrementCount();
if (!counter.WaitFor(std::chrono::seconds(10))) {
TF_SetStatus(status, TF_FAILED_PRECONDITION,
"desired concurrency not reached");
return -1;
}
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
const int block_size = 8;
tf_gcs_filesystem::RamFileBlockCache cache(
block_size, 2 * callers * block_size, 0, fetcher);
std::vector<std::unique_ptr<Thread>> threads;
threads.reserve(callers);
for (int i = 0; i < callers; i++) {
threads.emplace_back(
Env::Default()->StartThread({}, "caller", [block_size, &cache, i]() {
std::vector<char> out;
TF_EXPECT_OK(
ReadCache(&cache, "a", i * block_size, block_size, &out));
std::vector<char> x(block_size, 'x');
EXPECT_EQ(out, x);
}));
}
}
TEST(RamFileBlockCacheTest, CoalesceConcurrentReads) {
const size_t block_size = 16;
int num_requests = 0;
Notification notification;
auto fetcher = [&num_requests, ¬ification, block_size](
const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
EXPECT_EQ(n, block_size);
EXPECT_EQ(offset, 0);
num_requests++;
memset(buffer, 'x', n);
notification.Notify();
Env::Default()->SleepForMicroseconds(100000);
TF_SetStatus(status, TF_OK, "");
return n;
};
tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0,
fetcher);
std::unique_ptr<Thread> concurrent(
Env::Default()->StartThread({}, "concurrent", [block_size, &cache] {
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, block_size / 2, &out));
EXPECT_EQ(out.size(), block_size / 2);
}));
notification.WaitForNotification();
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", block_size / 2, block_size / 2, &out));
EXPECT_EQ(out.size(), block_size / 2);
EXPECT_EQ(1, num_requests);
}
TEST(RamFileBlockCacheTest, Flush) {
int calls = 0;
auto fetcher = [&calls](const string& filename, size_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
calls++;
memset(buffer, 'x', n);
TF_SetStatus(status, TF_OK, "");
return n;
};
tf_gcs_filesystem::RamFileBlockCache cache(16, 32, 0, fetcher);
std::vector<char> out;
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
EXPECT_EQ(calls, 1);
cache.Flush();
TF_EXPECT_OK(ReadCache(&cache, "", 0, 16, &out));
EXPECT_EQ(calls, 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
643b14da-f17b-43cf-9351-78a2030c525c | cpp | abseil/abseil-cpp | cord_rep_crc | absl/strings/internal/cord_rep_crc.cc | absl/strings/internal/cord_rep_crc_test.cc | #include "absl/strings/internal/cord_rep_crc.h"
#include <cassert>
#include <cstdint>
#include <utility>
#include "absl/base/config.h"
#include "absl/strings/internal/cord_internal.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
CordRepCrc* CordRepCrc::New(CordRep* child, crc_internal::CrcCordState state) {
if (child != nullptr && child->IsCrc()) {
if (child->refcount.IsOne()) {
child->crc()->crc_cord_state = std::move(state);
return child->crc();
}
CordRep* old = child;
child = old->crc()->child;
CordRep::Ref(child);
CordRep::Unref(old);
}
auto* new_cordrep = new CordRepCrc;
new_cordrep->length = child != nullptr ? child->length : 0;
new_cordrep->tag = cord_internal::CRC;
new_cordrep->child = child;
new_cordrep->crc_cord_state = std::move(state);
return new_cordrep;
}
void CordRepCrc::Destroy(CordRepCrc* node) {
if (node->child != nullptr) {
CordRep::Unref(node->child);
}
delete node;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cord_rep_crc.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/crc/internal/crc_cord_state.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_test_util.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::absl::cordrep_testing::MakeFlat;
using ::testing::Eq;
using ::testing::IsNull;
using ::testing::Ne;
#if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST
TEST(CordRepCrc, RemoveCrcWithNullptr) {
EXPECT_DEATH(RemoveCrcNode(nullptr), "");
}
#endif
absl::crc_internal::CrcCordState MakeCrcCordState(uint32_t crc) {
crc_internal::CrcCordState state;
state.mutable_rep()->prefix_crc.push_back(
crc_internal::CrcCordState::PrefixCrc(42, crc32c_t{crc}));
return state;
}
TEST(CordRepCrc, NewDestroy) {
CordRep* rep = cordrep_testing::MakeFlat("Hello world");
CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
EXPECT_TRUE(crc->refcount.IsOne());
EXPECT_THAT(crc->child, Eq(rep));
EXPECT_THAT(crc->crc_cord_state.Checksum(), Eq(crc32c_t{12345u}));
EXPECT_TRUE(rep->refcount.IsOne());
CordRepCrc::Destroy(crc);
}
TEST(CordRepCrc, NewExistingCrcNotShared) {
CordRep* rep = cordrep_testing::MakeFlat("Hello world");
CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
CordRepCrc* new_crc = CordRepCrc::New(crc, MakeCrcCordState(54321));
EXPECT_THAT(new_crc, Eq(crc));
EXPECT_TRUE(new_crc->refcount.IsOne());
EXPECT_THAT(new_crc->child, Eq(rep));
EXPECT_THAT(new_crc->crc_cord_state.Checksum(), Eq(crc32c_t{54321u}));
EXPECT_TRUE(rep->refcount.IsOne());
CordRepCrc::Destroy(new_crc);
}
TEST(CordRepCrc, NewExistingCrcShared) {
CordRep* rep = cordrep_testing::MakeFlat("Hello world");
CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
CordRep::Ref(crc);
CordRepCrc* new_crc = CordRepCrc::New(crc, MakeCrcCordState(54321));
EXPECT_THAT(new_crc, Ne(crc));
EXPECT_TRUE(new_crc->refcount.IsOne());
EXPECT_TRUE(crc->refcount.IsOne());
EXPECT_FALSE(rep->refcount.IsOne());
EXPECT_THAT(crc->child, Eq(rep));
EXPECT_THAT(new_crc->child, Eq(rep));
EXPECT_THAT(crc->crc_cord_state.Checksum(), Eq(crc32c_t{12345u}));
EXPECT_THAT(new_crc->crc_cord_state.Checksum(), Eq(crc32c_t{54321u}));
CordRep::Unref(crc);
CordRep::Unref(new_crc);
}
TEST(CordRepCrc, NewEmpty) {
CordRepCrc* crc = CordRepCrc::New(nullptr, MakeCrcCordState(12345));
EXPECT_TRUE(crc->refcount.IsOne());
EXPECT_THAT(crc->child, IsNull());
EXPECT_THAT(crc->length, Eq(0u));
EXPECT_THAT(crc->crc_cord_state.Checksum(), Eq(crc32c_t{12345u}));
EXPECT_TRUE(crc->refcount.IsOne());
CordRepCrc::Destroy(crc);
}
TEST(CordRepCrc, RemoveCrcNotCrc) {
CordRep* rep = cordrep_testing::MakeFlat("Hello world");
CordRep* nocrc = RemoveCrcNode(rep);
EXPECT_THAT(nocrc, Eq(rep));
CordRep::Unref(nocrc);
}
TEST(CordRepCrc, RemoveCrcNotShared) {
CordRep* rep = cordrep_testing::MakeFlat("Hello world");
CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
CordRep* nocrc = RemoveCrcNode(crc);
EXPECT_THAT(nocrc, Eq(rep));
EXPECT_TRUE(rep->refcount.IsOne());
CordRep::Unref(nocrc);
}
TEST(CordRepCrc, RemoveCrcShared) {
CordRep* rep = cordrep_testing::MakeFlat("Hello world");
CordRepCrc* crc = CordRepCrc::New(rep, MakeCrcCordState(12345));
CordRep::Ref(crc);
CordRep* nocrc = RemoveCrcNode(crc);
EXPECT_THAT(nocrc, Eq(rep));
EXPECT_FALSE(rep->refcount.IsOne());
CordRep::Unref(nocrc);
CordRep::Unref(crc);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_crc.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_crc_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
a6c01e1e-ca85-4c99-9bc7-027086e9a935 | cpp | tensorflow/tensorflow | thunk | third_party/xla/xla/service/gpu/runtime/thunk.cc | third_party/xla/xla/backends/cpu/runtime/thunk_test.cc | #include "xla/service/gpu/runtime/thunk.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/translate/mhlo_to_hlo/location_exporter.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/stream.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
Thunk::CollectiveCliques::CollectiveCliques(
NcclClique::AcquiredCliquesMap cliques_map)
: cliques_map_(std::move(cliques_map)) {}
absl::StatusOr<NcclApi::NcclCommHandle> Thunk::CollectiveCliques::GetComm(
const NcclCliqueKey& clique_key, int32_t rank) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
auto communicator = (*clique->second)->comm(rank);
if (!communicator.has_value()) {
return absl::InternalError(absl::StrCat("Communicator for rank ", rank,
" not found in a NCCL clique ",
clique_key.ToString()));
}
return *communicator;
}
absl::StatusOr<bool> Thunk::CollectiveCliques::is_local_clique(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->IsLocal();
}
absl::StatusOr<size_t> Thunk::CollectiveCliques::num_communicators(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->num_communicators();
}
using GlobalDeviceIdMap = Thunk::CollectiveExecuteParams::GlobalDeviceIdMap;
static absl::StatusOr<GlobalDeviceId> GetGlobalDeviceId(
const GlobalDeviceIdMap* device_id_map, int64_t local_device_ordinal) {
if (!device_id_map) return GlobalDeviceId(local_device_ordinal);
auto it = device_id_map->find(local_device_ordinal);
if (it == device_id_map->end())
return absl::NotFoundError(
absl::StrCat("No global device id found for local device ordinal: ",
local_device_ordinal));
return it->second;
}
absl::StatusOr<Thunk::CollectiveExecuteParams>
Thunk::CollectiveExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels) {
const GpuExecutableRunOptions* gpu_options =
run_options.run_options().gpu_executable_run_options();
auto* device_id_map = gpu_options && gpu_options->gpu_global_device_ids()
? &*gpu_options->gpu_global_device_ids()
: nullptr;
auto* nccl_callback = gpu_options && gpu_options->nccl_clique_id_callback()
? &gpu_options->nccl_clique_id_callback()
: nullptr;
TF_ASSIGN_OR_RETURN(GlobalDeviceId global_device_id,
GetGlobalDeviceId(device_id_map, local_device_ordinal));
return CollectiveExecuteParams(
run_options.stream()->parent(), run_options.run_options().run_id(),
async_streams, local_device_ordinal, global_device_id,
run_options.run_options().device_assignment(), device_id_map,
nccl_callback, collective_max_nchannels, p2p_max_nchannels);
}
Thunk::CollectiveExecuteParams::CollectiveExecuteParams(
se::StreamExecutor* executor, RunId run_id,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
GlobalDeviceId global_device_id, const DeviceAssignment* device_assn,
const GlobalDeviceIdMap* global_device_id_map,
const NcclCliqueIdCallback* nccl_clique_id_callback,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels)
: executor(executor),
run_id(run_id),
async_streams(async_streams.begin(), async_streams.end()),
local_device_ordinal(local_device_ordinal),
global_device_id(global_device_id),
device_assn(device_assn),
global_device_id_map(global_device_id_map),
nccl_clique_id_callback(nccl_clique_id_callback),
collective_max_nchannels(collective_max_nchannels),
p2p_max_nchannels(p2p_max_nchannels) {}
Thunk::ExecuteParams Thunk::ExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
const BufferAllocations& buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
ExecutionStreamIdMap additional_compute_streams) {
return ExecuteParams(&buffer_allocations, stream, command_buffer_trace_stream,
collective_params, collective_cliques,
run_options.run_options().device_to_host_stream(),
run_options.run_options().host_to_device_stream(),
run_options.run_options().send_device_memory_function(),
run_options.run_options().recv_device_memory_function(),
run_options.run_options().ffi_execution_context(),
additional_compute_streams,
run_options.run_options().gpu_executable_run_options()
? run_options.run_options()
.gpu_executable_run_options()
->enable_mock_nccl_collectives()
: false);
}
Thunk::ExecuteParams Thunk::ExecuteParams::CloneWithNewAllocations(
const Thunk::ExecuteParams& params,
const BufferAllocations& buffer_allocations) {
return ExecuteParams(
&buffer_allocations, params.stream, params.command_buffer_trace_stream,
params.collective_params, params.collective_cliques,
params.device_to_host_stream, params.host_to_device_stream,
params.send_device_memory_function, params.recv_device_memory_function,
params.ffi_execution_context, params.additional_compute_streams);
}
Thunk::ExecuteParams::ExecuteParams(
const BufferAllocations* buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques, se::Stream* device_to_host_stream,
se::Stream* host_to_device_stream,
SendDeviceMemoryFunction* send_device_memory_function,
RecvDeviceMemoryFunction* recv_device_memory_function,
const ffi::ExecutionContext* ffi_execution_context,
ExecutionStreamIdMap additional_compute_streams, bool mock_collectives)
: buffer_allocations(buffer_allocations),
stream(stream),
command_buffer_trace_stream(command_buffer_trace_stream),
collective_params(collective_params),
collective_cliques(collective_cliques),
device_to_host_stream(device_to_host_stream),
host_to_device_stream(host_to_device_stream),
send_device_memory_function(send_device_memory_function),
recv_device_memory_function(recv_device_memory_function),
ffi_execution_context(ffi_execution_context),
additional_compute_streams(additional_compute_streams),
mock_collectives(mock_collectives) {}
absl::string_view Thunk::KindToString(Thunk::Kind kind) {
#define CASE(x) \
case Thunk::x: \
return #x
switch (kind) {
CASE(kDynamicSlice);
CASE(kCholesky);
CASE(kCommandBuffer);
CASE(kConditional);
CASE(kConvolution);
CASE(kConvolutionReorder);
CASE(kCopy);
CASE(kCopyDone);
CASE(kCubSort);
CASE(kCublasLtMatmul);
CASE(kCustomCall);
CASE(kCustomKernel);
CASE(kNcclAllGather);
CASE(kNcclAllGatherStart);
CASE(kNcclAllGatherDone);
CASE(kNcclAllReduce);
CASE(kNcclAllReduceStart);
CASE(kNcclAllReduceDone);
CASE(kNcclCollectiveBroadcast);
CASE(kNcclCollectiveBroadcastStart);
CASE(kNcclCollectiveBroadcastDone);
CASE(kNcclCollectivePermute);
CASE(kNcclCollectivePermuteStart);
CASE(kNcclCollectivePermuteDone);
CASE(kNcclReduceScatter);
CASE(kNcclReduceScatterStart);
CASE(kNcclReduceScatterDone);
CASE(kNcclAllToAll);
CASE(kNcclAllToAllStart);
CASE(kNcclAllToAllDone);
CASE(kNcclSend);
CASE(kNcclSendDone);
CASE(kNcclRecv);
CASE(kNcclRecvDone);
CASE(kFft);
CASE(kGemm);
CASE(kInfeed);
CASE(kKernel);
CASE(kMemset32BitValue);
CASE(kMemzero);
CASE(kNorm);
CASE(kOutfeed);
CASE(kSend);
CASE(kSendDone);
CASE(kPartitionId);
CASE(kReplicaId);
CASE(kRecv);
CASE(kRecvDone);
CASE(kSequential);
CASE(kTriangularSolve);
CASE(kWhile);
CASE(kWaitForStreams);
CASE(kCuDnn);
}
}
absl::StatusOr<se::Stream*> Thunk::GetStreamForExecution(
ExecutionStreamId stream_id, const ExecuteParams& params) {
if (stream_id == kDefaultExecutionStreamId) {
return params.stream;
}
auto iter = params.additional_compute_streams.find(stream_id);
if (iter == params.additional_compute_streams.end()) {
return absl::InvalidArgumentError("Invalid execution stream id.");
}
return iter->second;
}
std::ostream& operator<<(std::ostream& os, Thunk::Kind kind) {
return os << Thunk::KindToString(kind);
}
bool IsReductionCollective(Thunk::Kind kind) {
return kind == Thunk::kNcclAllReduce || kind == Thunk::kNcclAllReduceStart ||
kind == Thunk::kNcclReduceScatter ||
kind == Thunk::kNcclReduceScatterStart;
}
Thunk::ThunkInfo Thunk::ThunkInfo::WithProfileAnnotation(
const HloInstruction* instr) {
ThunkInfo thunk_info;
thunk_info.profile_annotation = instr->name();
auto gpu_backend_config = instr->backend_config<GpuBackendConfig>();
if (gpu_backend_config.ok()) {
thunk_info.execution_stream_id =
std::max<uint64_t>(kDefaultExecutionStreamId.value(),
gpu_backend_config->operation_queue_id());
}
return thunk_info;
}
bool Thunk::IsCollective() const {
switch (kind()) {
case kNcclAllGather:
case kNcclAllGatherStart:
case kNcclAllGatherDone:
case kNcclAllReduce:
case kNcclAllReduceStart:
case kNcclAllReduceDone:
case kNcclCollectiveBroadcast:
case kNcclCollectiveBroadcastStart:
case kNcclCollectiveBroadcastDone:
case kNcclCollectivePermute:
case kNcclCollectivePermuteStart:
case kNcclCollectivePermuteDone:
case kNcclReduceScatter:
case kNcclReduceScatterStart:
case kNcclReduceScatterDone:
case kNcclAllToAll:
case kNcclAllToAllStart:
case kNcclAllToAllDone:
case kNcclSend:
case kNcclSendDone:
case kNcclRecv:
case kNcclRecvDone:
return true;
default:
return false;
}
}
}
} | #include "xla/backends/cpu/runtime/thunk.h"
#include <cstdint>
#include <utility>
#include "xla/executable_run_options.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class ThunkExecuteStateTestHelper : public Thunk {
public:
static ExecuteState CreateExecuteState(int64_t parallel_tasks) {
return ExecuteState(parallel_tasks);
}
};
TEST(ThunkTest, OkExecuteEventSingleton) {
auto event = Thunk::OkExecuteEventSingleton();
ASSERT_TRUE(event.IsConcrete());
}
TEST(ThunkExecuteStateTest, OneTask) {
auto execute_state =
ThunkExecuteStateTestHelper::CreateExecuteState(1);
EXPECT_FALSE(execute_state.event.IsAvailable());
execute_state.Notify();
EXPECT_TRUE(execute_state.event.IsAvailable());
}
TEST(ThunkExecuteStateTest, MultipleTasks) {
int parallel_tasks = 10;
auto execute_state =
ThunkExecuteStateTestHelper::CreateExecuteState(parallel_tasks);
for (int i = 0; i < parallel_tasks; ++i) {
EXPECT_FALSE(execute_state.event.IsAvailable());
execute_state.Notify();
}
EXPECT_TRUE(execute_state.event.IsAvailable());
}
TEST(ThunkTest, ExecuteSession) {
Thunk::ExecuteSession session(2, 2);
EXPECT_EQ(session.num_workers(), 0);
{
Thunk::ExecuteSession::Lock lock = session.Join();
EXPECT_TRUE(lock);
EXPECT_EQ(session.num_workers(), 1);
}
EXPECT_EQ(session.num_workers(), 0);
Thunk::ExecuteSession::Lock lock0 = session.TryJoin();
Thunk::ExecuteSession::Lock lock1 = session.TryJoin();
EXPECT_TRUE(lock0);
EXPECT_TRUE(lock1);
EXPECT_EQ(session.num_workers(), 2);
Thunk::ExecuteSession::Lock lock2 = session.TryJoin();
EXPECT_FALSE(lock2);
EXPECT_EQ(session.num_workers(), 2);
Thunk::ExecuteSession::Lock lock3 = session.Join();
EXPECT_TRUE(lock3);
EXPECT_EQ(session.num_workers(), 3);
auto sink = [](Thunk::ExecuteSession::Lock lock) {};
sink(std::move(lock0));
sink(std::move(lock1));
sink(std::move(lock3));
EXPECT_EQ(session.num_workers(), 0);
Thunk::ExecuteSession::Lock lock4 = session.Join();
Thunk::ExecuteSession::Lock lock5 = lock4;
EXPECT_TRUE(lock4);
EXPECT_TRUE(lock5);
EXPECT_EQ(session.num_workers(), 2);
}
TEST(ThunkTest, CollectiveExecuteParams) {
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
TF_ASSERT_OK_AND_ASSIGN(auto params,
Thunk::CollectiveExecuteParams::Create(&run_options));
EXPECT_NE(params.collectives, nullptr);
CpuExecutableRunOptions cpu_run_options;
cpu_run_options.set_collectives(
reinterpret_cast<CollectivesInterface*>(0x12345678));
run_options.set_cpu_executable_run_options(&cpu_run_options);
TF_ASSERT_OK_AND_ASSIGN(params,
Thunk::CollectiveExecuteParams::Create(&run_options));
EXPECT_EQ(params.collectives,
reinterpret_cast<CollectivesInterface*>(0x12345678));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e27303bd-5458-4010-86a5-e3c07c076a7b | cpp | google/tensorstore | dimension_identifier | tensorstore/index_space/dimension_identifier.cc | tensorstore/index_space/dimension_identifier_test.cc | #include "tensorstore/index_space/dimension_identifier.h"
#include <cassert>
#include <ostream>
#include <string>
#include <system_error>
#include <variant>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const DimensionIdentifier& x) {
if (x.label().data()) {
return os << QuoteString(x.label());
}
return os << x.index();
}
Result<DimensionIndex> NormalizeDimensionIndex(DimensionIndex index,
DimensionIndex rank) {
assert(rank >= 0);
if (index < -rank || index >= rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Dimension index ", index, " is outside valid range [-", rank, ", ",
rank, ")"));
}
return index >= 0 ? index : index + rank;
}
Result<DimensionIndex> NormalizeDimensionExclusiveStopIndex(
DimensionIndex index, DimensionIndex rank) {
assert(rank >= 0);
if (index < -rank - 1 || index > rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Dimension exclusive stop index ", index, " is outside valid range [-",
rank + 1, ", ", rank, "]"));
}
return index >= 0 ? index : index + rank;
}
namespace {
template <typename Label>
Result<DimensionIndex> NormalizeDimensionLabelImpl(std::string_view label,
span<const Label> labels) {
if (label.empty()) {
return absl::InvalidArgumentError(
"Dimension cannot be specified by empty label");
}
const DimensionIndex dim =
std::find(labels.begin(), labels.end(), label) - labels.begin();
if (dim == labels.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Label ", QuoteString(label), " does not match one of {",
absl::StrJoin(labels, ", ",
[](std::string* out, std::string_view x) {
*out += QuoteString(x);
}),
"}"));
}
return dim;
}
}
Result<DimensionIndex> NormalizeDimensionLabel(std::string_view label,
span<const std::string> labels) {
return NormalizeDimensionLabelImpl(label, labels);
}
Result<DimensionIndex> NormalizeDimensionLabel(
std::string_view label, span<const std::string_view> labels) {
return NormalizeDimensionLabelImpl(label, labels);
}
Result<DimensionIndex> NormalizeDimensionIdentifier(
DimensionIdentifier identifier, span<const std::string> labels) {
if (identifier.label().data()) {
return NormalizeDimensionLabel(identifier.label(), labels);
} else {
return NormalizeDimensionIndex(identifier.index(), labels.size());
}
}
std::ostream& operator<<(std::ostream& os, const DimRangeSpec& spec) {
if (spec.inclusive_start) os << *spec.inclusive_start;
os << ':';
if (spec.exclusive_stop) os << *spec.exclusive_stop;
if (spec.step != 1) os << ':' << spec.step;
return os;
}
bool operator==(const DimRangeSpec& a, const DimRangeSpec& b) {
return a.inclusive_start == b.inclusive_start &&
a.exclusive_stop == b.exclusive_stop && a.step == b.step;
}
absl::Status NormalizeDimRangeSpec(const DimRangeSpec& spec,
DimensionIndex rank,
DimensionIndexBuffer* result) {
const DimensionIndex step = spec.step;
if (step == 0) {
return absl::InvalidArgumentError("step must not be 0");
}
DimensionIndex inclusive_start;
if (spec.inclusive_start) {
TENSORSTORE_ASSIGN_OR_RETURN(
inclusive_start, NormalizeDimensionIndex(*spec.inclusive_start, rank));
} else if (step > 0) {
inclusive_start = 0;
} else {
inclusive_start = rank - 1;
}
DimensionIndex exclusive_stop;
if (spec.exclusive_stop) {
TENSORSTORE_ASSIGN_OR_RETURN(
exclusive_stop,
NormalizeDimensionExclusiveStopIndex(*spec.exclusive_stop, rank));
if ((step > 0 && exclusive_stop < inclusive_start) ||
(step < 0 && exclusive_stop > inclusive_start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat(spec, " is not a valid range"));
}
} else if (step > 0) {
exclusive_stop = rank;
} else {
exclusive_stop = -1;
}
const DimensionIndex size =
CeilOfRatio(exclusive_stop - inclusive_start, step);
result->reserve(result->size() + size);
for (DimensionIndex i = 0; i < size; ++i) {
result->push_back(inclusive_start + step * i);
}
return absl::OkStatus();
}
absl::Status NormalizeDynamicDimSpec(const DynamicDimSpec& spec,
span<const std::string> labels,
DimensionIndexBuffer* result) {
struct Visitor {
span<const std::string> labels;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, labels.size()));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionLabel(label, labels));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, labels.size(), result);
}
};
return std::visit(Visitor{labels, result}, spec);
}
absl::Status NormalizeDynamicDimSpecs(span<const DynamicDimSpec> specs,
span<const std::string> labels,
DimensionIndexBuffer* result) {
for (const auto& spec : specs) {
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpec(spec, labels, result));
}
return absl::OkStatus();
}
} | #include "tensorstore/index_space/dimension_identifier.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIdentifier;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::DynamicDimSpec;
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::NormalizeDimensionIdentifier;
using ::tensorstore::NormalizeDimensionIndex;
using ::tensorstore::span;
using ::tensorstore::StrCat;
TEST(DimensionIdentifierTest, ConstructDefault) {
DimensionIdentifier d;
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ(nullptr, d.label().data());
}
TEST(DimensionIdentifierTest, ConstructDimensionIndex) {
DimensionIdentifier d(5);
EXPECT_EQ(5, d.index());
EXPECT_EQ(nullptr, d.label().data());
}
TEST(DimensionIdentifierTest, ConstructStringView) {
DimensionIdentifier d(std::string_view("hello"));
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, ConstructCString) {
DimensionIdentifier d("hello");
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, ConstructStdString) {
std::string s = "hello";
DimensionIdentifier d(s);
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, Compare) {
EXPECT_EQ(DimensionIdentifier(3), DimensionIdentifier(3));
EXPECT_EQ(DimensionIdentifier("a"), DimensionIdentifier("a"));
EXPECT_NE(DimensionIdentifier("a"), DimensionIdentifier(2));
EXPECT_NE(DimensionIdentifier("a"), DimensionIdentifier("b"));
EXPECT_NE(DimensionIdentifier(2), DimensionIdentifier(3));
}
TEST(DimensionIdentifierTest, PrintToOstream) {
EXPECT_EQ("3", StrCat(DimensionIdentifier(3)));
EXPECT_EQ("\"a\"", StrCat(DimensionIdentifier("a")));
}
TEST(NormalizeDimensionIndexTest, ValidNonNegative) {
EXPECT_EQ(0, NormalizeDimensionIndex(0, 5));
EXPECT_EQ(3, NormalizeDimensionIndex(3, 5));
EXPECT_EQ(4, NormalizeDimensionIndex(4, 5));
}
TEST(NormalizeDimensionIndexTest, ValidNegative) {
EXPECT_EQ(0, NormalizeDimensionIndex(-5, 5));
EXPECT_EQ(2, NormalizeDimensionIndex(-3, 5));
EXPECT_EQ(4, NormalizeDimensionIndex(-1, 5));
}
TEST(NormalizeDimensionIndexTest, InvalidNegative) {
EXPECT_THAT(NormalizeDimensionIndex(-6, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIndex(-7, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionIndexTest, InvalidNonNegative) {
EXPECT_THAT(NormalizeDimensionIndex(5, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIndex(6, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionLabelTest, ValidLabel) {
EXPECT_EQ(2, NormalizeDimensionLabel(
"x", span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionLabelTest, MissingLabel) {
EXPECT_THAT(NormalizeDimensionLabel(
"w", span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionLabelTest, EmptyLabel) {
EXPECT_THAT(NormalizeDimensionLabel(
"", span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionIdentifierTest, ValidLabel) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
"x", span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, ValidPositiveIndex) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
2, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(0, NormalizeDimensionIdentifier(
0, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(3, NormalizeDimensionIdentifier(
3, span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, ValidNegativeIndex) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
-2, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(3, NormalizeDimensionIdentifier(
-1, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(0, NormalizeDimensionIdentifier(
-4, span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, InvalidIndex) {
EXPECT_THAT(NormalizeDimensionIdentifier(
4, span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIdentifier(
-5, span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(DimRangeSpecTest, Comparison) {
DimRangeSpec a{1, 5, 1};
DimRangeSpec b{0, 5, 1};
DimRangeSpec c{1, 6, 1};
DimRangeSpec d{1, 6, 2};
EXPECT_EQ(a, a);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
}
TEST(DimRangeSpecTest, PrintToOstream) {
EXPECT_EQ("1:5", StrCat(DimRangeSpec{1, 5, 1}));
EXPECT_EQ("1:5:2", StrCat(DimRangeSpec{1, 5, 2}));
EXPECT_EQ(":5", StrCat(DimRangeSpec{std::nullopt, 5, 1}));
EXPECT_EQ("1:", StrCat(DimRangeSpec{1, std::nullopt, 1}));
EXPECT_EQ(":", StrCat(DimRangeSpec{std::nullopt, std::nullopt, 1}));
EXPECT_EQ("::-1", StrCat(DimRangeSpec{std::nullopt, std::nullopt, -1}));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 10, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 3, 4, 5, 6, 7, 8, 9));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 10, 2}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 4, 6, 8));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep2Floor) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 7, 3}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 5));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{9, 1, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(9, 8, 7, 6, 5, 4, 3, 2));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStepNeg2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{9, 1, -2}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(9, 7, 5, 3));
}
TEST(NormalizeDimRangeSpecTest, ValidStartOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{15, std::nullopt, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(15, 16, 17, 18, 19));
}
TEST(NormalizeDimRangeSpecTest, ValidStartOnlyStepNegative1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{5, std::nullopt, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(5, 4, 3, 2, 1, 0));
}
TEST(NormalizeDimRangeSpecTest, ValidNegativeStartOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{-5, std::nullopt, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(15, 16, 17, 18, 19));
}
TEST(NormalizeDimRangeSpecTest, ValidStopOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, 5, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidNegativeStopOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, -15, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidStopOnlyStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, 15, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(19, 18, 17, 16));
}
TEST(NormalizeDimRangeSpecTest, ValidNoBoundsStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 1},
5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidNoBoundsStep2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 2},
5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidMaxStop) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{1, 5, 1}, 5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, InvalidStep0) {
DimensionIndexBuffer buffer;
EXPECT_THAT(
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 0}, 5,
&buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument, "step must not be 0"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIntervalStep1) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{3, 1, 1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"3:1 is not a valid range"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIntervalStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{1, 3, -1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1:3:-1 is not a valid range"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIndex) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{1, 8, 1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension exclusive stop index 8 is outside valid "
"range \\[-6, 5\\]"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_identifier.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_identifier_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2172c17c-1538-4397-a83d-016d03c34528 | cpp | google/tensorstore | sender | tensorstore/util/execution/sender.h | tensorstore/util/execution/sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_SENDER_H_
#include <cstddef>
#include <tuple>
#include <utility>
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
class NullReceiver {
public:
template <typename CancelReceiver>
friend void set_starting(NullReceiver&, CancelReceiver) {}
template <typename... V>
friend void set_value(NullReceiver&, V...) {}
friend void set_done(NullReceiver&) {}
template <typename E>
friend void set_error(NullReceiver&, E e) {}
friend void set_cancel(NullReceiver&) {}
friend void set_stopping(NullReceiver&) {}
};
class NullSender {
template <typename R>
friend void submit(NullSender&, R&&) {}
};
struct CancelSender {
template <typename Receiver>
friend void submit(CancelSender, Receiver&& receiver) {
execution::set_cancel(receiver);
}
};
template <typename E>
struct ErrorSender {
E error;
template <typename Receiver>
friend void submit(ErrorSender& sender, Receiver&& receiver) {
execution::set_error(receiver, std::move(sender.error));
}
};
template <typename E>
ErrorSender(E error) -> ErrorSender<E>;
template <typename... V>
struct ValueSender {
ValueSender(V... v) : value(std::move(v)...) {}
std::tuple<V...> value;
template <typename Receiver>
friend void submit(ValueSender& sender, Receiver&& receiver) {
sender.SubmitHelper(std::forward<Receiver>(receiver),
std::make_index_sequence<sizeof...(V)>{});
}
private:
template <typename Receiver, size_t... Is>
void SubmitHelper(Receiver&& receiver, std::index_sequence<Is...>) {
execution::set_value(receiver, std::move(std::get<Is>(value))...);
}
};
template <typename... V>
ValueSender(V... v) -> ValueSender<V...>;
}
#endif | #include "tensorstore/util/execution/sender.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/executor.h"
namespace {
template <typename T, typename... Arg>
using trait_has_submit =
decltype(std::declval<T&&>().submit(std::declval<Arg>()...));
template <typename... Arg>
using trait_has_adl_submit = decltype(submit(std::declval<Arg>()...));
static_assert(!tensorstore::internal::is_detected<
trait_has_submit, tensorstore::NullSender&, int>::value);
static_assert(tensorstore::internal::is_detected<
trait_has_adl_submit, tensorstore::NullSender&, int>::value);
TEST(NullReceiverTest, SetDone) {
tensorstore::NullReceiver receiver;
tensorstore::execution::set_done(receiver);
}
TEST(NullReceiverTest, SetValue) {
tensorstore::NullReceiver receiver;
tensorstore::execution::set_value(receiver, 3, 4);
}
TEST(NullReceiverTest, SetError) {
tensorstore::NullReceiver receiver;
tensorstore::execution::set_error(receiver, 10);
}
TEST(CancelSenderTest, Basic) {
std::vector<std::string> log;
tensorstore::execution::submit(tensorstore::CancelSender{},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(ErrorSenderTest, Basic) {
std::vector<std::string> log;
tensorstore::execution::submit(tensorstore::ErrorSender<int>{3},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(ValueSenderTest, Basic) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::ValueSender<int, std::string>{3, "hello"},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
template <typename Sender, typename Executor>
struct SenderWithExecutor {
Executor executor;
Sender sender;
template <typename Receiver>
void submit(Receiver receiver) {
struct Callback {
Sender sender;
Receiver receiver;
void operator()() {
tensorstore::execution::submit(sender, std::move(receiver));
}
};
executor(Callback{std::move(sender), std::move(receiver)});
}
};
struct QueueExecutor {
std::vector<tensorstore::ExecutorTask>* queue;
void operator()(tensorstore::ExecutorTask task) const {
queue->push_back(std::move(task));
}
};
TEST(SenderWithExecutorTest, SetValue) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
SenderWithExecutor<tensorstore::ValueSender<int, std::string>,
tensorstore::Executor>{executor, {3, "hello"}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3, hello"));
}
TEST(SenderWithExecutorTest, SetError) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
SenderWithExecutor<tensorstore::ErrorSender<int>, tensorstore::Executor>{
executor, {3}},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 3"));
}
TEST(SenderWithExecutorTest, SetCancel) {
std::vector<tensorstore::ExecutorTask> queue;
std::vector<std::string> log;
QueueExecutor executor{&queue};
tensorstore::execution::submit(
SenderWithExecutor<tensorstore::CancelSender, tensorstore::Executor>{
executor},
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_EQ(1, queue.size());
std::move(queue[0])();
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
816e5959-39dc-40a9-be92-53a02da28dc4 | cpp | tensorflow/tensorflow | sorting | third_party/xla/xla/hlo/builder/lib/sorting.cc | third_party/xla/xla/service/gpu/tests/sorting_test.cc | #include "xla/hlo/builder/lib/sorting.h"
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaOp TopK(XlaOp input, int64_t k, PrimitiveType index_type) {
XlaBuilder* const builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
int last_dim = input_shape.dimensions_size() - 1;
int64_t last_dim_size = input_shape.dimensions(last_dim);
const int64_t kPerPartitionSize = 8192;
const int64_t kLastDimSizeThreshold = 524288;
const int64_t kMinNumPartitions = 8;
const int64_t kMinimalK = 1000;
if ((k >= kMinimalK) && (k < kPerPartitionSize) &&
(kPerPartitionSize / k > 2) && last_dim_size >= kLastDimSizeThreshold) {
int64_t num_partitions =
CeilOfRatio(last_dim_size - k, kPerPartitionSize - k);
if (num_partitions >= kMinNumPartitions) {
return TopKWithPartitions(input, k, num_partitions, index_type);
}
}
Shape iota_shape =
ShapeUtil::MakeShape(index_type, input_shape.dimensions());
XlaOp iota = Iota(builder, iota_shape, last_dim);
for (int64_t i = 0; i < input_shape.rank(); ++i) {
if (input_shape.is_dynamic_dimension(i)) {
iota = SetDimensionSize(iota, GetDimensionSize(input, i), i);
}
}
auto input_dims = input_shape.dimensions();
constexpr int32_t kLow16BitsLimit = int32_t{1} << 16;
constexpr int32_t kLow16BitsMask = kLow16BitsLimit - 1;
constexpr int32_t kHigh16BitsMask = ~kLow16BitsMask;
constexpr int kMaxLastDimSizeForSmallBatches = 1500;
constexpr int kSmallBatchSizeThreshold = 8;
const bool use_packed_bf16_sort =
(input_shape.element_type() == BF16 &&
last_dim_size < kLow16BitsLimit &&
(last_dim_size < kMaxLastDimSizeForSmallBatches ||
(input_shape.rank() == 2 &&
input_shape.dimensions(0) >= kSmallBatchSizeThreshold)));
std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end());
limit_indices[last_dim] = k;
std::vector<int64_t> strides(input_shape.dimensions_size(), 1);
XlaOp values;
XlaOp indices;
if (use_packed_bf16_sort) {
auto sign_magnitude_to_from_ones_complement = [builder](const XlaOp in) {
constexpr int32_t kAllNonSignBits = 0x7fffffff;
XlaOp in_s32 = BitcastConvertType(in, S32);
return Xor(
And(in_s32, ConstantR0<int32_t>(builder, kAllNonSignBits)),
ShiftRightArithmetic(in_s32, ConstantR0<int32_t>(builder, 31)));
};
XlaOp input_f32_trimmed =
Or(sign_magnitude_to_from_ones_complement(
BitcastConvertType(ConvertElementType(input, F32), S32)),
ConstantR0<int32_t>(builder, kLow16BitsMask));
XlaOp input_and_iota = Xor(input_f32_trimmed, iota);
XlaOp sort_result_raw =
Sort({input_and_iota},
CreateScalarGtComputation({index_type}, builder), last_dim,
false);
sort_result_raw =
Slice(sort_result_raw, start_indices, limit_indices, strides);
sort_result_raw = RemoveDynamicDimension(sort_result_raw, last_dim);
values = ConvertElementType(
BitcastConvertType(
And(sign_magnitude_to_from_ones_complement(sort_result_raw),
ConstantR0<int32_t>(builder, kHigh16BitsMask)),
F32),
BF16);
indices = And(
Xor(sort_result_raw, ConstantR0<int32_t>(builder, kLow16BitsMask)),
ConstantR0<int32_t>(builder, kLow16BitsMask));
} else {
XlaOp sort_result =
Sort({input, iota},
CreateScalarGtComputation(
{input_shape.element_type(), index_type}, iota.builder()),
last_dim, true);
values = Slice(GetTupleElement(sort_result, 0), start_indices,
limit_indices, strides);
values = RemoveDynamicDimension(values, last_dim);
indices = Slice(GetTupleElement(sort_result, 1), start_indices,
limit_indices, strides);
indices = RemoveDynamicDimension(indices, last_dim);
}
return Tuple(builder, {values, indices});
});
}
XlaOp TopKWithPartitions(XlaOp input, int64_t k, int64_t num_partitions,
PrimitiveType index_type) {
XlaBuilder* const builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
int last_dim = input_shape.dimensions_size() - 1;
auto input_dims = input_shape.dimensions();
int64_t last_dim_size = input_shape.dimensions(last_dim);
const int64_t per_partition_size =
CeilOfRatio(last_dim_size, num_partitions);
if (k >= per_partition_size) {
return TopK(input, k, index_type);
}
Shape iota_shape =
ShapeUtil::MakeShape(index_type, input_shape.dimensions());
XlaOp iota = Iota(builder, iota_shape, last_dim);
for (int64_t i = 0; i < input_shape.rank(); ++i) {
if (input_shape.is_dynamic_dimension(i)) {
iota = SetDimensionSize(iota, GetDimensionSize(input, i), i);
}
}
auto topk_body_fn =
[&](XlaOp partition, absl::Span<const XlaOp> values_and_indices,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto values = values_and_indices[0];
auto indices = values_and_indices[1];
auto input = values_and_indices[2];
auto iota = values_and_indices[3];
XlaOp start =
Mul(Add(partition, One(builder, index_type)),
ConstantR0WithType(builder, index_type, per_partition_size));
XlaOp sliced_input =
DynamicSliceInMinorDims(input, {start}, {per_partition_size});
XlaOp sliced_indices =
DynamicSliceInMinorDims(iota, {start}, {per_partition_size});
sliced_input = ConcatInDim(builder, {values, sliced_input}, last_dim);
sliced_indices =
ConcatInDim(builder, {indices, sliced_indices}, last_dim);
XlaOp sort_result = Sort(
{sliced_input, sliced_indices},
CreateScalarGtComputation({input_shape.element_type(), index_type},
sliced_indices.builder()),
last_dim, true);
std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end());
std::vector<int64_t> strides(input_shape.dimensions_size(), 1);
start_indices[last_dim] = 0;
limit_indices[last_dim] = k;
values = Slice(GetTupleElement(sort_result, 0), start_indices,
limit_indices, strides);
indices = Slice(GetTupleElement(sort_result, 1), start_indices,
limit_indices, strides);
return std::vector<XlaOp>{values, indices, input, iota};
};
std::vector<int64_t> start_indices(input_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(input_dims.begin(), input_dims.end());
std::vector<int64_t> strides(input_shape.dimensions_size(), 1);
start_indices[last_dim] = 0;
limit_indices[last_dim] = per_partition_size;
XlaOp sliced_input = Slice(input, start_indices, limit_indices, strides);
XlaOp sliced_indices = Slice(iota, start_indices, limit_indices, strides);
XlaOp sort_result =
Sort({sliced_input, sliced_indices},
CreateScalarGtComputation({input_shape.element_type(), index_type},
sliced_indices.builder()),
last_dim, true);
start_indices[last_dim] = 0;
limit_indices[last_dim] = k;
XlaOp values = Slice(GetTupleElement(sort_result, 0), start_indices,
limit_indices, strides);
XlaOp indices = Slice(GetTupleElement(sort_result, 1), start_indices,
limit_indices, strides);
TF_ASSIGN_OR_RETURN(
auto values_and_indices,
ForEachIndex(num_partitions - 1, index_type, topk_body_fn,
{values, indices, input, iota}, "topk_with_partition",
builder));
return Tuple(builder, {values_and_indices[0], values_and_indices[1]});
});
}
} | #include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "Eigen/Core"
#include "xla/error_spec.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class SortingTest : public GpuCodegenTest {
protected:
SortingTest() {}
};
TEST_F(SortingTest, Regression1) {
const char* hlo_text = R"(
HloModule TestModule
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY TestComputation {
x = f32[3, 2]{1, 0} parameter(0)
x.copy = f32[3, 2]{0, 1} copy(x)
ROOT sort = f32[3, 2]{0, 1} sort(x.copy), dimensions={1}, to_apply=compare
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
static constexpr int kRadixSortTestSize = 100000;
template <typename T>
bool CheckOrder(T lhs, T rhs, bool asc, int pos) {
if (asc) {
EXPECT_TRUE(lhs <= rhs) << lhs << " > " << rhs << " @" << pos;
} else {
EXPECT_TRUE(lhs >= rhs) << lhs << " < " << rhs << " @" << pos;
}
return lhs != rhs;
}
bool CompareAdjacentValues(const Literal& literal, int index, bool ascending) {
if (primitive_util::IsFloatingPointType(literal.shape().element_type())) {
return CheckOrder(*literal.GetAsDouble({index - 1}),
*literal.GetAsDouble({index}), ascending, index);
} else {
return CheckOrder(*literal.GetIntegralAsS64({index - 1}),
*literal.GetIntegralAsS64({index}), ascending, index);
}
}
std::string GetTypeName(PrimitiveType type) {
return absl::AsciiStrToLower(PrimitiveType_Name(type));
}
class CubSortKeysTest : public GpuCodegenTest,
public ::testing::WithParamInterface<
std::tuple<std::shared_ptr<Literal>, bool>> {};
TEST_P(CubSortKeysTest, SortKeys) {
constexpr char kHloTemplate[] = R"(
HloModule TestModule
ENTRY %main {
%input = $0[$1] parameter(0)
%sort = ($0[$1], u8[$2]) custom-call(%input),
custom_call_target="__cub$$DeviceRadixSort",
backend_config="{\"descending\": $3}"
ROOT %gte = get-tuple-element(%sort), index=0
}
)";
bool ascending = std::get<1>(GetParam());
std::string hlo = absl::Substitute(
kHloTemplate,
GetTypeName(std::get<0>(GetParam())->shape().element_type()),
kRadixSortTestSize,
kRadixSortTestSize * 10,
ascending ? "false" : "true");
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
std::vector<Literal*> literals = {std::get<0>(GetParam()).get()};
auto result = ExecuteAndTransfer(std::move(module), literals);
bool has_diff = false;
for (int i = 1; i < kRadixSortTestSize; ++i) {
has_diff |= CompareAdjacentValues(result, i, ascending);
}
EXPECT_TRUE(has_diff) << "uninitialized output";
}
class CubSortPairsTest
: public GpuCodegenTest,
public ::testing::WithParamInterface<
std::tuple<std::shared_ptr<Literal>, PrimitiveType, bool>> {};
TEST_P(CubSortPairsTest, SortPairs) {
constexpr char kHloTemplate[] = R"(
HloModule TestModule
ENTRY %main {
%keys = $0[$2] parameter(0)
%values = $1[$2] convert(%keys)
ROOT %sort = ($0[$2], $1[$2], u8[$3]) custom-call(%keys, %values),
custom_call_target="__cub$$DeviceRadixSort",
backend_config="{\"descending\": $4}"
}
)";
bool ascending = std::get<2>(GetParam());
std::string hlo = absl::Substitute(
kHloTemplate,
GetTypeName(std::get<0>(GetParam())->shape().element_type()),
GetTypeName(std::get<1>(GetParam())), kRadixSortTestSize,
kRadixSortTestSize * 20,
ascending ? "false" : "true");
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
std::vector<Literal*> literals = {std::get<0>(GetParam()).get()};
auto result_tuple = ExecuteAndTransfer(std::move(module), literals);
std::vector<Literal> result = result_tuple.DecomposeTuple();
bool has_diff = false;
for (int i = 1; i < kRadixSortTestSize; ++i) {
has_diff |= CompareAdjacentValues(result[0], i, ascending);
has_diff |= CompareAdjacentValues(result[1], i, ascending);
}
EXPECT_TRUE(has_diff) << "uninitialized output";
}
template <PrimitiveType P, typename T>
std::shared_ptr<Literal> CreateRandomLiteral(T mean, T stddev) {
Shape shape = ShapeUtil::MakeShape(P, {kRadixSortTestSize});
auto maybe_literal =
LiteralUtil::CreateRandomLiteral<P, T>(shape, mean, stddev);
CHECK_OK(maybe_literal);
auto shared_literal = std::make_shared<Literal>(shape);
CHECK_OK(shared_literal->MoveFrom(std::move(*maybe_literal)));
return shared_literal;
}
INSTANTIATE_TEST_SUITE_P(
TestRadixSort, CubSortKeysTest,
::testing::Combine(
::testing::Values(
CreateRandomLiteral<F16, half>(
half(), Eigen::half_impl::float_to_half_rtne(1)),
CreateRandomLiteral<F32, float>(0, 1),
CreateRandomLiteral<F64, double>(0, 1),
CreateRandomLiteral<S8, int8_t>(0, 10),
CreateRandomLiteral<S16, int16_t>(0, 1000),
CreateRandomLiteral<S32, int32_t>(0, 1000000),
CreateRandomLiteral<U8, uint8_t>(128, 10),
CreateRandomLiteral<U16, uint16_t>(32768, 1000),
CreateRandomLiteral<U32, uint32_t>(1 << 30, 1000000)),
::testing::Bool()),
[](const ::testing::TestParamInfo<CubSortKeysTest::ParamType>& info) {
return absl::StrCat(
PrimitiveType_Name(std::get<0>(info.param)->shape().element_type()),
"_", std::get<1>(info.param) ? "asc" : "desc");
});
INSTANTIATE_TEST_SUITE_P(
TestRadixSort, CubSortPairsTest,
::testing::Combine(
::testing::Values(CreateRandomLiteral<U16, uint16_t>(32768, 1000),
CreateRandomLiteral<U32, uint32_t>(32768, 1000),
CreateRandomLiteral<U64, uint64_t>(32768, 1000)),
::testing::Values(F16, F32, F64), ::testing::Bool()),
[](const ::testing::TestParamInfo<CubSortPairsTest::ParamType>& info) {
return absl::StrCat(
PrimitiveType_Name(std::get<0>(info.param)->shape().element_type()),
"_", PrimitiveType_Name(std::get<1>(info.param)), "_",
std::get<2>(info.param) ? "asc" : "desc");
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/sorting.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/tests/sorting_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d01d5a6-3afb-4951-8c70-dc57dad14950 | cpp | tensorflow/tensorflow | nn_grad | tensorflow/c/experimental/gradients/nn_grad.cc | tensorflow/c/experimental/gradients/nn_grad_test.cc | #include "tensorflow/c/experimental/gradients/nn_grad.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/experimental/ops/nn_ops.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
using std::vector;
using tensorflow::ops::BiasAddGrad;
using tensorflow::ops::ReluGrad;
namespace tensorflow {
namespace gradients {
namespace {
class ReluGradientFunction : public GradientFunction {
public:
explicit ReluGradientFunction(vector<AbstractTensorHandle*> f_outputs)
: forward_outputs_(f_outputs) {
for (auto output : forward_outputs_) {
if (output) {
output->Ref();
}
}
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
AbstractTensorHandle* activations = forward_outputs_[0];
std::string name = "relu_grad";
TF_RETURN_IF_ERROR(ReluGrad(ctx, upstream_grad, activations,
&grad_inputs[0], name.c_str()));
return absl::OkStatus();
}
~ReluGradientFunction() override {
for (auto output : forward_outputs_) {
if (output) {
output->Unref();
}
}
}
private:
vector<AbstractTensorHandle*> forward_outputs_;
};
Status BroadcastMul(AbstractContext* ctx, AbstractTensorHandle* vec,
AbstractTensorHandle* mat,
absl::Span<AbstractTensorHandle*> outputs) {
if (!isa<ImmediateExecutionContext>(ctx)) {
return errors::Unimplemented(
"BroadcastMul is not supported in tracing mode yet.");
}
auto imm_ctx = dyn_cast<ImmediateExecutionContext>(ctx);
AbstractTensorPtr minus_1(imm_ctx->CreateInt32Scalar(-1));
ImmediateTensorHandlePtr dim(imm_ctx->CreateLocalHandle(minus_1.get()));
AbstractTensorHandle* expand_dims_outputs;
TF_RETURN_IF_ERROR(
ops::ExpandDims(ctx, vec, dim.get(), &expand_dims_outputs, "ExpandDims"));
TF_RETURN_IF_ERROR(
ops::Mul(ctx, expand_dims_outputs, mat, &outputs[0], "Mul"));
expand_dims_outputs->Unref();
return absl::OkStatus();
}
class SparseSoftmaxCrossEntropyWithLogitsGradientFunction
: public GradientFunction {
public:
explicit SparseSoftmaxCrossEntropyWithLogitsGradientFunction(
vector<AbstractTensorHandle*> f_outputs)
: forward_outputs_(f_outputs) {}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
TF_RETURN_IF_ERROR(BroadcastMul(
ctx, grad_outputs[0], forward_outputs_[1],
grad_inputs.subspan(0, 1)));
grad_inputs[1] = nullptr;
return absl::OkStatus();
}
~SparseSoftmaxCrossEntropyWithLogitsGradientFunction() override {}
private:
vector<AbstractTensorHandle*> forward_outputs_;
};
class BiasAddGradientFunction : public GradientFunction {
public:
explicit BiasAddGradientFunction(AttrBuilder f_attrs)
: forward_attrs_(f_attrs) {}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
DCHECK(upstream_grad);
std::string data_format;
TF_RETURN_IF_ERROR(forward_attrs_.Get("data_format", &data_format));
grad_inputs[0] = upstream_grad;
grad_inputs[0]->Ref();
std::string name = "bias_add_grad";
TF_RETURN_IF_ERROR(BiasAddGrad(ctx, upstream_grad, &grad_inputs[1],
data_format.c_str(), name.c_str()));
return absl::OkStatus();
}
~BiasAddGradientFunction() override {}
private:
AttrBuilder forward_attrs_;
};
}
GradientFunction* ReluRegisterer(const ForwardOperation& op) {
return new ReluGradientFunction(op.outputs);
}
GradientFunction* SparseSoftmaxCrossEntropyWithLogitsRegisterer(
const ForwardOperation& op) {
return new SparseSoftmaxCrossEntropyWithLogitsGradientFunction(op.outputs);
}
GradientFunction* BiasAddRegisterer(const ForwardOperation& op) {
return new BiasAddGradientFunction(op.attrs);
}
}
} | #include "tensorflow/c/experimental/gradients/nn_grad.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/grad_test_helper.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/nn_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using tensorflow::TF_StatusPtr;
Status ReluModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Relu(ctx, inputs[0], &outputs[0], "Relu");
}
Status SparseSoftmaxCrossEntropyWithLogitsModel(
AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
AbstractTensorHandle* loss;
AbstractTensorHandle* backprop;
TF_RETURN_IF_ERROR(ops::SparseSoftmaxCrossEntropyWithLogits(
ctx, inputs[0], inputs[1], &loss, &backprop,
"SparseSoftmaxCrossEntropyWithLogits"));
outputs[0] = loss;
backprop->Unref();
return absl::OkStatus();
}
Status BiasAddModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::BiasAdd(ctx, inputs[0], inputs[1], &outputs[0], "NHWC",
"BiasAdd");
}
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
enable_tensor_float_32_execution(false);
}
AbstractContextPtr immediate_execution_ctx_;
GradientRegistry registry_;
Status status_;
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
TEST_P(CppGradients, TestReluGrad) {
status_ = registry_.Register("Relu", ReluRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto ReluGradModel = BuildGradModel(ReluModel, registry_);
float X_vals[] = {1.0f, 2.0f, 3.0f, -5.0f, -4.0f, -3.0f, 2.0f, 10.0f, -1.0f};
int64_t X_dims[] = {3, 3};
AbstractTensorHandlePtr X;
{
AbstractTensorHandle* X_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
X.reset(X_raw);
}
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
ReluModel, ReluGradModel, immediate_execution_ctx_.get(), {X.get()},
UseFunction()));
AbstractTensorHandlePtr Y;
{
AbstractTensorHandle* Y_raw;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 0.0f, &Y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Y.reset(Y_raw);
}
std::vector<AbstractTensorHandle*> outputs(1);
status_ = RunModel(ReluGradModel, immediate_execution_ctx_.get(), {Y.get()},
absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], {0.0f}, {},
0));
outputs[0]->Unref();
}
TEST_P(CppGradients, TestSparseSoftmaxCrossEntropyWithLogitsGrad) {
if (UseFunction()) {
GTEST_SKIP() << "Can't take gradient of "
"SparseSoftmaxCrossEntropyWithLogits in tracing mode.";
}
float X_vals[] = {1.0f, 2.0f, 3.0f, -5.0f, -4.0f, -3.0f, 2.0f, 0.0f, -1.0f};
int64_t X_dims[] = {3, 3};
AbstractTensorHandlePtr X;
{
AbstractTensorHandle* X_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
X.reset(X_raw);
}
int32_t Y_vals[] = {1, 0, 1};
int64_t Y_dims[] = {3};
AbstractTensorHandlePtr Y;
{
AbstractTensorHandle* Y_raw;
status_ = TestTensorHandleWithDims<int32_t, TF_INT32>(
immediate_execution_ctx_.get(), Y_vals, Y_dims, 1, &Y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Y.reset(Y_raw);
}
status_ = registry_.Register("SparseSoftmaxCrossEntropyWithLogits",
SparseSoftmaxCrossEntropyWithLogitsRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SparseSoftmaxCrossEntropyWithLogitsModel,
BuildGradModel(SparseSoftmaxCrossEntropyWithLogitsModel, registry_),
immediate_execution_ctx_.get(), {X.get(), Y.get()}, UseFunction()));
}
TEST_P(CppGradients, TestBiasAddGrad) {
if (UseFunction() && UseMlir()) {
GTEST_SKIP() << "SetAttrString has not been implemented yet.\n";
}
float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f};
int64_t A_dims[] = {2, 2};
AbstractTensorHandlePtr A;
{
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
float Bias_vals[] = {2.0f, 3.0f};
int64_t Bias_dims[] = {2};
AbstractTensorHandlePtr Bias;
{
AbstractTensorHandle* Bias_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), Bias_vals, Bias_dims, 1, &Bias_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Bias.reset(Bias_raw);
}
status_ = registry_.Register("BiasAdd", BiasAddRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
BiasAddModel, BuildGradModel(BiasAddModel, registry_),
immediate_execution_ctx_.get(), {A.get(), Bias.get()}, UseFunction()));
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/nn_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/nn_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b76f2da2-2c1b-4341-879a-45c8de64721a | cpp | tensorflow/tensorflow | snappy | tensorflow/core/platform/snappy.h | third_party/xla/xla/tsl/lib/io/snappy/snappy_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_SNAPPY_H_
#define TENSORFLOW_CORE_PLATFORM_SNAPPY_H_
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/snappy.h"
#if !defined(PLATFORM_WINDOWS)
#include <sys/uio.h>
#else
namespace tensorflow {
using tsl::iovec;
}
#endif
namespace tensorflow {
namespace port {
using tsl::port::Snappy_Compress;
using tsl::port::Snappy_CompressFromIOVec;
using tsl::port::Snappy_GetUncompressedLength;
using tsl::port::Snappy_Uncompress;
using tsl::port::Snappy_UncompressToIOVec;
}
}
#endif | #include <memory>
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/inputbuffer.h"
#include "xla/tsl/lib/io/random_inputstream.h"
#include "xla/tsl/lib/io/snappy/snappy_inputbuffer.h"
#include "xla/tsl/lib/io/snappy/snappy_inputstream.h"
#include "xla/tsl/lib/io/snappy/snappy_outputbuffer.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
static void CheckPrefixSuffix(absl::string_view str, const string& prefix,
const string& suffix) {
CHECK_GE(str.size(), prefix.size());
CHECK_GE(str.size(), suffix.size());
CHECK_EQ(str.substr(0, prefix.length()), prefix);
CHECK_EQ(str.substr(str.length() - suffix.length()), suffix);
}
static string GetRecord() {
static const string lorem_ipsum =
"Lorem ipsum dolor sit amet, consectetur adipiscing elit."
" Fusce vehicula tincidunt libero sit amet ultrices. Vestibulum non "
"felis augue. Duis vitae augue id lectus lacinia congue et ut purus. "
"Donec auctor, nisl at dapibus volutpat, diam ante lacinia dolor, vel"
"dignissim lacus nisi sed purus. Duis fringilla nunc ac lacus sagittis"
" efficitur. Praesent tincidunt egestas eros, eu vehicula urna ultrices"
" et. Aliquam erat volutpat. Maecenas vehicula risus consequat risus"
" dictum, luctus tincidunt nibh imperdiet. Aenean bibendum ac erat"
" cursus scelerisque. Cras lacinia in enim dapibus iaculis. Nunc porta"
" felis lectus, ac tincidunt massa pharetra quis. Fusce feugiat dolor"
" vel ligula rutrum egestas. Donec vulputate quam eros, et commodo"
" purus lobortis sed.";
return lorem_ipsum;
}
static string GenTestString(int copies = 1) {
string result = "";
for (int i = 0; i < copies; i++) {
result += GetRecord();
}
return result;
}
absl::Status TestMultipleWritesWriteFile(size_t compress_input_buf_size,
size_t compress_output_buf_size,
int num_writes, bool with_flush,
int num_copies,
bool corrupt_compressed_file,
string& fname, string& data,
string& expected_result) {
Env* env = Env::Default();
fname = testing::TmpDir() + "/snappy_buffers_test";
data = GenTestString(num_copies);
std::unique_ptr<WritableFile> file_writer;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file_writer));
io::SnappyOutputBuffer out(file_writer.get(), compress_input_buf_size,
compress_output_buf_size);
for (int i = 0; i < num_writes; i++) {
TF_RETURN_IF_ERROR(out.Write(absl::string_view(data)));
if (with_flush) {
TF_RETURN_IF_ERROR(out.Flush());
}
strings::StrAppend(&expected_result, data);
}
TF_RETURN_IF_ERROR(out.Flush());
TF_RETURN_IF_ERROR(file_writer->Flush());
TF_RETURN_IF_ERROR(file_writer->Close());
if (corrupt_compressed_file) {
string corrupt_fname = testing::TmpDir() + "/snappy_buffers_test_corrupt";
std::unique_ptr<WritableFile> corrupt_file_writer;
TF_RETURN_IF_ERROR(
env->NewWritableFile(corrupt_fname, &corrupt_file_writer));
std::unique_ptr<RandomAccessFile> file_reader;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file_reader));
absl::string_view data;
size_t file_pos = 0;
size_t bytes_to_read = 256;
char* scratch = new char[bytes_to_read];
char* buffer = new char[bytes_to_read];
size_t buffer_size = 0;
while ((file_reader->Read(file_pos, bytes_to_read, &data, scratch)).ok()) {
file_pos += data.size();
TF_CHECK_OK(
corrupt_file_writer->Append(absl::string_view(buffer, buffer_size)));
memcpy(buffer, data.data(), data.size());
buffer_size = data.size();
}
TF_CHECK_OK(corrupt_file_writer->Append(
absl::string_view(buffer, buffer_size - 1)));
TF_CHECK_OK(corrupt_file_writer->Flush());
TF_CHECK_OK(corrupt_file_writer->Close());
delete[] scratch;
delete[] buffer;
fname = corrupt_fname;
}
return absl::OkStatus();
}
absl::Status TestMultipleWrites(size_t compress_input_buf_size,
size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size,
int num_writes = 1, bool with_flush = false,
int num_copies = 1,
bool corrupt_compressed_file = false) {
Env* env = Env::Default();
string expected_result;
string fname;
string data;
TF_RETURN_IF_ERROR(TestMultipleWritesWriteFile(
compress_input_buf_size, compress_output_buf_size, num_writes, with_flush,
num_copies, corrupt_compressed_file, fname, data, expected_result));
std::unique_ptr<RandomAccessFile> file_reader;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file_reader));
io::SnappyInputBuffer in(file_reader.get(), uncompress_input_buf_size,
uncompress_output_buf_size);
for (int attempt = 0; attempt < 2; ++attempt) {
string actual_result;
for (int i = 0; i < num_writes; i++) {
tstring decompressed_output;
TF_RETURN_IF_ERROR(in.ReadNBytes(data.size(), &decompressed_output));
strings::StrAppend(&actual_result, decompressed_output);
}
if (actual_result != expected_result) {
return errors::DataLoss("Actual and expected results don't match.");
}
TF_RETURN_IF_ERROR(in.Reset());
}
return absl::OkStatus();
}
absl::Status TestMultipleWritesInputStream(
size_t compress_input_buf_size, size_t compress_output_buf_size,
size_t uncompress_input_buf_size, size_t uncompress_output_buf_size,
int num_writes = 1, bool with_flush = false, int num_copies = 1,
bool corrupt_compressed_file = false) {
Env* env = Env::Default();
string expected_result;
string fname;
string data;
TF_RETURN_IF_ERROR(TestMultipleWritesWriteFile(
compress_input_buf_size, compress_output_buf_size, num_writes, with_flush,
num_copies, corrupt_compressed_file, fname, data, expected_result));
std::unique_ptr<RandomAccessFile> file_reader;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(fname, &file_reader));
io::RandomAccessInputStream random_input_stream(file_reader.get(), false);
io::SnappyInputStream snappy_input_stream(&random_input_stream,
uncompress_output_buf_size);
for (int attempt = 0; attempt < 2; ++attempt) {
string actual_result;
for (int i = 0; i < num_writes; ++i) {
tstring decompressed_output;
TF_RETURN_IF_ERROR(
snappy_input_stream.ReadNBytes(data.size(), &decompressed_output));
strings::StrAppend(&actual_result, decompressed_output);
}
if (actual_result != expected_result) {
return errors::DataLoss("Actual and expected results don't match.");
}
TF_RETURN_IF_ERROR(snappy_input_stream.Reset());
}
return absl::OkStatus();
}
void TestTellWriteFile(size_t compress_input_buf_size,
size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size, int num_copies,
string& fname, string& data) {
Env* env = Env::Default();
fname = testing::TmpDir() + "/snappy_buffers_test";
data = GenTestString(num_copies);
std::unique_ptr<WritableFile> file_writer;
TF_CHECK_OK(env->NewWritableFile(fname, &file_writer));
io::SnappyOutputBuffer out(file_writer.get(), compress_input_buf_size,
compress_output_buf_size);
TF_CHECK_OK(out.Write(absl::string_view(data)));
TF_CHECK_OK(out.Flush());
TF_CHECK_OK(file_writer->Flush());
TF_CHECK_OK(file_writer->Close());
}
void TestTell(size_t compress_input_buf_size, size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size, int num_copies = 1) {
Env* env = Env::Default();
string data;
string fname;
TestTellWriteFile(compress_input_buf_size, compress_output_buf_size,
uncompress_input_buf_size, uncompress_output_buf_size,
num_copies, fname, data);
tstring first_half(string(data, 0, data.size() / 2));
tstring bytes_read;
std::unique_ptr<RandomAccessFile> file_reader;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file_reader));
io::SnappyInputBuffer in(file_reader.get(), uncompress_input_buf_size,
uncompress_output_buf_size);
TF_CHECK_OK(in.ReadNBytes(first_half.size(), &bytes_read));
EXPECT_EQ(in.Tell(), first_half.size());
EXPECT_EQ(bytes_read, first_half);
tstring second_half;
TF_CHECK_OK(in.ReadNBytes(data.size() - first_half.size(), &second_half));
EXPECT_EQ(in.Tell(), data.size());
bytes_read.append(second_half);
EXPECT_EQ(bytes_read, data);
}
void TestTellInputStream(size_t compress_input_buf_size,
size_t compress_output_buf_size,
size_t uncompress_input_buf_size,
size_t uncompress_output_buf_size,
int num_copies = 1) {
Env* env = Env::Default();
string data;
string fname;
TestTellWriteFile(compress_input_buf_size, compress_output_buf_size,
uncompress_input_buf_size, uncompress_output_buf_size,
num_copies, fname, data);
tstring first_half(string(data, 0, data.size() / 2));
tstring bytes_read;
std::unique_ptr<RandomAccessFile> file_reader;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file_reader));
io::RandomAccessInputStream random_input_stream(file_reader.get(), false);
io::SnappyInputStream in(&random_input_stream, uncompress_output_buf_size);
TF_CHECK_OK(in.ReadNBytes(first_half.size(), &bytes_read));
EXPECT_EQ(in.Tell(), first_half.size());
EXPECT_EQ(bytes_read, first_half);
tstring second_half;
TF_CHECK_OK(in.ReadNBytes(data.size() - first_half.size(), &second_half));
EXPECT_EQ(in.Tell(), data.size());
bytes_read.append(second_half);
EXPECT_EQ(bytes_read, data);
}
static bool SnappyCompressionSupported() {
string out;
absl::string_view in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::Snappy_Compress(in.data(), in.size(), &out);
}
TEST(SnappyBuffers, MultipleWritesWithoutFlush) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "Snappy disabled. Skipping test\n");
return;
}
TF_CHECK_OK(TestMultipleWrites(10000, 10000, 10000, 10000, 2));
TF_CHECK_OK(TestMultipleWritesInputStream(10000, 10000, 10000, 10000, 2));
}
TEST(SnappyBuffers, MultipleWriteCallsWithFlush) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
TF_CHECK_OK(TestMultipleWrites(10000, 10000, 10000, 10000, 2, true));
TF_CHECK_OK(
TestMultipleWritesInputStream(10000, 10000, 10000, 10000, 2, true));
}
TEST(SnappyBuffers, SmallUncompressInputBuffer) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status = TestMultipleWrites(10000, 10000, 10, 10000, 2, true);
CHECK_EQ(status.code(), error::Code::RESOURCE_EXHAUSTED);
CheckPrefixSuffix(
status.message(),
"Input buffer(size: 10 bytes) too small. Should be larger than ",
" bytes.");
}
TEST(SnappyBuffers, SmallUncompressInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
CHECK_EQ(TestMultipleWritesInputStream(10000, 10000, 10000, 10, 2, true),
errors::ResourceExhausted(
"Output buffer(size: 10 bytes) too small. ",
"Should be larger than ", GetRecord().size(), " bytes."));
}
TEST(SnappyBuffers, CorruptBlock) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status =
TestMultipleWrites(10000, 10000, 700, 10000, 2, true, 1, true);
CHECK_EQ(status.code(), error::Code::DATA_LOSS);
CheckPrefixSuffix(status.message(), "Failed to read ",
" bytes from file. Possible data corruption.");
}
TEST(SnappyBuffers, CorruptBlockInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status =
TestMultipleWritesInputStream(10000, 10000, 700, 10000, 2, true, 1, true);
CHECK_EQ(status.code(), error::Code::DATA_LOSS);
CheckPrefixSuffix(status.message(), "Failed to read ",
" bytes from file. Possible data corruption.");
}
TEST(SnappyBuffers, CorruptBlockLargeInputBuffer) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
CHECK_EQ(TestMultipleWrites(10000, 10000, 2000, 10000, 2, true, 1, true),
errors::OutOfRange("EOF reached"));
}
TEST(SnappyBuffers, CorruptBlockLargeInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
absl::Status status = TestMultipleWritesInputStream(10000, 10000, 2000, 10000,
2, true, 1, true);
CHECK_EQ(status.code(), error::Code::DATA_LOSS);
CheckPrefixSuffix(status.message(), "Failed to read ",
" bytes from file. Possible data corruption.");
}
TEST(SnappyBuffers, Tell) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
TestTell(10000, 10000, 2000, 10000, 2);
}
TEST(SnappyBuffers, TellInputStream) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
TestTellInputStream(10000, 10000, 2000, 10000, 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/snappy.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/snappy/snappy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
12fc993c-78e4-40a0-8d2e-c84b4ff0b025 | cpp | google/tensorstore | std_optional | tensorstore/internal/estimate_heap_usage/std_optional.h | tensorstore/internal/json_binding/std_optional_test.cc | #ifndef TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_STD_OPTIONAL_H_
#define TENSORSTORE_INTERNAL_ESTIMATE_HEAP_USAGE_STD_OPTIONAL_H_
#include <optional>
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
namespace tensorstore {
namespace internal {
template <typename T>
struct HeapUsageEstimator<std::optional<T>> {
static size_t EstimateHeapUsage(const std::optional<T>& v, size_t max_depth) {
if (!v) return 0;
return internal::EstimateHeapUsage(*v, max_depth);
}
static constexpr bool MayUseHeapMemory() {
return internal::MayUseHeapMemory<T>;
}
};
}
}
#endif | #include "tensorstore/internal/json_binding/std_optional.h"
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(JsonBindingTest, Optional) {
tensorstore::TestJsonBinderRoundTrip<std::optional<int>>({
{3, ::nlohmann::json(3)},
{std::nullopt, ::nlohmann::json(::nlohmann::json::value_t::discarded)},
});
}
TEST(JsonBindingTest, OptionalWithNull) {
auto binder = jb::OptionalWithNull();
tensorstore::TestJsonBinderRoundTrip<std::optional<int>>(
{
{3, ::nlohmann::json(3)},
{std::nullopt, ::nlohmann::json(nullptr)},
},
binder);
}
TEST(JsonBindingTest, OptionalExplicitNullopt) {
const auto binder =
jb::Optional(jb::DefaultBinder<>, [] { return "nullopt"; });
tensorstore::TestJsonBinderRoundTrip<std::optional<int>>(
{
{3, 3},
{std::nullopt, "nullopt"},
},
binder);
}
TEST(JsonBindingTest, OptionalResult) {
::nlohmann::json j;
tensorstore::Result<int> x(absl::UnknownError("x"));
j = 3;
EXPECT_TRUE(jb::Optional()(std::false_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_TRUE(j.is_discarded());
j = 4;
EXPECT_TRUE(jb::Optional()(std::true_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_TRUE(x.has_value());
EXPECT_EQ(4, x.value());
j = ::nlohmann::json::value_t::discarded;
EXPECT_TRUE(jb::Optional()(std::false_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_FALSE(j.is_discarded());
EXPECT_EQ(4, j);
j = ::nlohmann::json::value_t::discarded;
EXPECT_TRUE(jb::Optional()(std::true_type{}, jb::NoOptions{}, &x, &j).ok());
EXPECT_TRUE(x.has_value());
EXPECT_EQ(4, x.value());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/estimate_heap_usage/std_optional.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_optional_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ff3afe7c-f15a-4cb4-a22b-73a9b0ec0931 | cpp | tensorflow/tensorflow | space_to_batch_nd | tensorflow/lite/kernels/space_to_batch_nd.cc | tensorflow/lite/kernels/space_to_batch_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace space_to_batch_nd {
enum KernelType {
kReference,
kGenericOptimized,
};
struct SpaceToBatchNDContext {
SpaceToBatchNDContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
block_shape = GetInput(context, node, 1);
paddings = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* block_shape;
const TfLiteTensor* paddings;
TfLiteTensor* output;
};
const int kInputMinDimensionNum = 3;
const int kInputMaxDimensionNum = 4;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
SpaceToBatchNDContext* op_context) {
TfLiteIntArray* input_size = op_context->input->dims;
const int32* block_shape = GetTensorData<int32>(op_context->block_shape);
const int32* paddings_data = GetTensorData<int32>(op_context->paddings);
int spatial_dims_num = input_size->size - 2;
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), 1);
TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->paddings), 2);
TF_LITE_ENSURE_EQ(context, op_context->paddings->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, op_context->paddings->dims->data[1], 2);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
int output_batch_size = input_size->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
int final_dim_size = (input_size->data[dim + 1] + paddings_data[dim * 2] +
paddings_data[dim * 2 + 1]);
TF_LITE_ENSURE(context, block_shape[dim] != 0);
TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0);
output_size->data[dim + 1] = final_dim_size / block_shape[dim];
output_batch_size *= block_shape[dim];
}
output_size->data[0] = output_batch_size;
output_size->data[input_size->size - 1] =
input_size->data[input_size->size - 1];
return context->ResizeTensor(context, op_context->output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
SpaceToBatchNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_TYPES_EQ(context, op_context.input->type,
op_context.output->type);
if (op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
}
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(op_context.block_shape) ||
!IsConstantOrPersistentTensor(op_context.paddings)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SpaceToBatchNDContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_SPACE_TO_BATCH_ND(type, scalar, pad_value) \
tflite::SpaceToBatchParams op_params; \
op_params.output_offset = pad_value; \
type::SpaceToBatchND(op_params, GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.paddings), \
GetTensorData<int32_t>(op_context.paddings), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, float, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, float, 0);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, uint8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, uint8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int8_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int8_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int16_t,
op_context.output->params.zero_point);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int16_t,
op_context.output->params.zero_point);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int32_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int32_t, 0);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_SPACE_TO_BATCH_ND(reference_ops, int64_t, 0);
} else {
TF_LITE_SPACE_TO_BATCH_ND(optimized_ops, int64_t, 0);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by SpaceToBatch.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_BATCH_ND
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, space_to_batch_nd::Prepare,
space_to_batch_nd::Eval<space_to_batch_nd::kReference>};
return &r;
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, space_to_batch_nd::Prepare,
space_to_batch_nd::Eval<space_to_batch_nd::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_SPACE_TO_BATCH_ND() {
return Register_SPACE_TO_BATCH_ND_GENERIC_OPT();
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::Matcher;
class SpaceToBatchNDOpModel : public SingleOpModel {
public:
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
template <typename T>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
void SetBlockShape(std::initializer_list<int> data) {
PopulateTensor<int>(block_shape_, data);
}
void SetPaddings(std::initializer_list<int> data) {
PopulateTensor<int>(paddings_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
protected:
int input_;
int block_shape_;
int paddings_;
int output_;
};
class SpaceToBatchNDOpConstModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpConstModel(
const TensorData& input, std::initializer_list<int> block_shape,
std::initializer_list<int> paddings, const TensorData& output,
std::initializer_list<int> paddings_dims = {2, 2}) {
input_ = AddInput(input);
block_shape_ = AddConstInput(TensorType_INT32, block_shape,
{static_cast<int>(block_shape.size())});
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_dims);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreter({input.shape});
}
};
class SpaceToBatchNDOpDynamicModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpDynamicModel(
const TensorData& input, const TensorData& output,
std::initializer_list<int> block_shape_dims = {2},
std::initializer_list<int> paddings_dims = {2, 2}) {
input_ = AddInput(input);
block_shape_ = AddInput(TensorType_INT32);
paddings_ = AddInput(TensorType_INT32);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreter({input.shape, block_shape_dims, paddings_dims});
}
};
#if GTEST_HAS_DEATH_TEST
TEST(SpaceToBatchNDOpTest, InvalidShapeTest) {
EXPECT_DEATH(
SpaceToBatchNDOpConstModel({TensorType_FLOAT32, {1, 3, 3, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32}),
"Cannot allocate tensors");
}
#endif
TEST(SpaceToBatchNDOpTest, SimpleConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, SimpleDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2, 2});
m.SetPaddings({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, MultipleInputBatchesConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {2, 2, 4, 1}}, {2, 2},
{0, 0, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, MultipleInputBatchesDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2, 2});
m.SetPaddings({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(SpaceToBatchNDOpTest, SimplePaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 5, 2, 1}}, {3, 2},
{1, 0, 2, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(SpaceToBatchNDOpTest, SimplePaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 5, 2, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 0, 2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(SpaceToBatchNDOpTest, ComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 2, 1}}, {3, 2},
{1, 1, 2, 4}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
TEST(SpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 2, 1}},
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
template <typename integer_dtype = int8_t>
std::vector<Matcher<float>> DequantizedArrayNear(
const std::vector<float>& values, const float min, const float max) {
const float quantization_tolerance =
(max - min) / (std::numeric_limits<integer_dtype>::max() -
std::numeric_limits<integer_dtype>::min());
return ArrayFloatNear(values, quantization_tolerance);
}
#if GTEST_HAS_DEATH_TEST
TEST(QuantizedSpaceToBatchNDOpTest, ZeroNotInQuantizationRange) {
EXPECT_DEATH(SpaceToBatchNDOpConstModel m(
{TensorType_UINT8, {1, 2, 2, 1}, 1.0, 2.0}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, {TensorType_UINT8, {}, 1.0, 2.0}),
".*Check failed: f_min <= 0.*");
}
#endif
template <typename integer_dtype>
void SimplePaddingConstTestQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
SpaceToBatchNDOpConstModel m(
{GetTensorType<integer_dtype>(), {1, 5, 2, 1}, 1.0f * kMin, 1.0f * kMax},
{3, 2}, {1, 0, 2, 0},
{GetTensorType<integer_dtype>(), {}, 1.0f * kMin, 1.0f * kMax});
m.SetQuantizedInput<integer_dtype>(
{-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8, -0.9, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(DequantizedArrayNear<integer_dtype>(
{0, 0, 0, -0.5, 0, 0, 0, 0.6, 0, -0.1, 0, -0.7,
0, 0.2, 0, 0.8, 0, -0.3, 0, -0.9, 0, 0.4, 0, 0.1},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestUint8) {
SimplePaddingConstTestQuant<uint8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestInt8) {
SimplePaddingConstTestQuant<int8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingConstTestInt16) {
SimplePaddingConstTestQuant<int16_t>();
}
template <typename integer_dtype>
void SimplePaddingDynamicTestQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
SpaceToBatchNDOpDynamicModel m(
{GetTensorType<integer_dtype>(), {1, 5, 2, 1}, 1.0f * kMin, 1.0f * kMax},
{GetTensorType<integer_dtype>(), {}, 1.0f * kMin, 1.0f * kMax});
m.SetQuantizedInput<integer_dtype>(
{-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8, -0.9, 0.1});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 0, 2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(DequantizedArrayNear<integer_dtype>(
{0, 0, 0, -0.5, 0, 0, 0, 0.6, 0, -0.1, 0, -0.7,
0, 0.2, 0, 0.8, 0, -0.3, 0, -0.9, 0, 0.4, 0, 0.1},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestUint8) {
SimplePaddingDynamicTestQuant<uint8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestInt8) {
SimplePaddingDynamicTestQuant<int8_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, SimplePaddingDynamicTestInt16) {
SimplePaddingDynamicTestQuant<int16_t>();
}
TEST(QuantizedSpaceToBatchNDOpTest, ComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_UINT8, {1, 4, 2, 1}, -1.0, 1.0},
{3, 2}, {1, 1, 2, 4},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput<uint8_t>({-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(DequantizedArrayNear(
{
0, 0, 0, 0, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.6, 0, 0,
0, -0.1, 0, 0, 0, -0.7, 0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0,
0, -0.3, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0,
},
-1.0, 1.0)));
}
TEST(QuantizedSpaceToBatchNDOpTest, ComplexPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_UINT8, {1, 4, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {}, -1.0, 1.0});
m.SetQuantizedInput<uint8_t>({-0.1, 0.2, -0.3, 0.4, -0.5, 0.6, -0.7, 0.8});
m.SetBlockShape({3, 2});
m.SetPaddings({1, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(DequantizedArrayNear(
{
0, 0, 0, 0, 0, -0.5, 0, 0, 0, 0, 0, 0, 0, 0.6, 0, 0,
0, -0.1, 0, 0, 0, -0.7, 0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0,
0, -0.3, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0, 0, 0, 0, 0,
},
-1.0, 1.0)));
}
TEST(SpaceToBatchNDOpTest, Simple3DConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4}}, {2}, {0, 0},
{TensorType_FLOAT32}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 9, 10, 11, 12, 5, 6,
7, 8, 13, 14, 15, 16}));
}
TEST(SpaceToBatchNDOpTest, Simple3DPaddingConstTest) {
SpaceToBatchNDOpConstModel m({TensorType_FLOAT32, {1, 4, 4}}, {2}, {2, 2},
{TensorType_FLOAT32}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4, 4}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 1, 2, 3, 4, 9, 10, 11, 12, 0, 0, 0, 0,
0, 0, 0, 0, 5, 6, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0}));
}
TEST(SpaceToBatchNDOpTest, Simple3DDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4}},
{TensorType_FLOAT32}, {1}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2});
m.SetPaddings({0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 9, 10, 11, 12, 5, 6,
7, 8, 13, 14, 15, 16}));
}
TEST(SpaceToBatchNDOpTest, Simple3DPaddingDynamicTest) {
SpaceToBatchNDOpDynamicModel m({TensorType_FLOAT32, {1, 4, 4}},
{TensorType_FLOAT32}, {1}, {1, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetBlockShape({2});
m.SetPaddings({2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 4, 4}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 1, 2, 3, 4, 9, 10, 11, 12, 0, 0, 0, 0,
0, 0, 0, 0, 5, 6, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/space_to_batch_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/space_to_batch_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21de3764-d775-4eb2-95c1-d92abb056bcf | cpp | google/quiche | quic_network_blackhole_detector | quiche/quic/core/quic_network_blackhole_detector.cc | quiche/quic/core/quic_network_blackhole_detector_test.cc | #include "quiche/quic/core/quic_network_blackhole_detector.h"
#include <algorithm>
#include "quiche/quic/core/quic_constants.h"
namespace quic {
QuicNetworkBlackholeDetector::QuicNetworkBlackholeDetector(Delegate* delegate,
QuicAlarm* alarm)
: delegate_(delegate), alarm_(*alarm) {}
void QuicNetworkBlackholeDetector::OnAlarm() {
QuicTime next_deadline = GetEarliestDeadline();
if (!next_deadline.IsInitialized()) {
QUIC_BUG(quic_bug_10328_1) << "BlackholeDetector alarm fired unexpectedly";
return;
}
QUIC_DVLOG(1) << "BlackholeDetector alarm firing. next_deadline:"
<< next_deadline
<< ", path_degrading_deadline_:" << path_degrading_deadline_
<< ", path_mtu_reduction_deadline_:"
<< path_mtu_reduction_deadline_
<< ", blackhole_deadline_:" << blackhole_deadline_;
if (path_degrading_deadline_ == next_deadline) {
path_degrading_deadline_ = QuicTime::Zero();
delegate_->OnPathDegradingDetected();
}
if (path_mtu_reduction_deadline_ == next_deadline) {
path_mtu_reduction_deadline_ = QuicTime::Zero();
delegate_->OnPathMtuReductionDetected();
}
if (blackhole_deadline_ == next_deadline) {
blackhole_deadline_ = QuicTime::Zero();
delegate_->OnBlackholeDetected();
}
UpdateAlarm();
}
void QuicNetworkBlackholeDetector::StopDetection(bool permanent) {
if (permanent) {
alarm_.PermanentCancel();
} else {
alarm_.Cancel();
}
path_degrading_deadline_ = QuicTime::Zero();
blackhole_deadline_ = QuicTime::Zero();
path_mtu_reduction_deadline_ = QuicTime::Zero();
}
void QuicNetworkBlackholeDetector::RestartDetection(
QuicTime path_degrading_deadline, QuicTime blackhole_deadline,
QuicTime path_mtu_reduction_deadline) {
path_degrading_deadline_ = path_degrading_deadline;
blackhole_deadline_ = blackhole_deadline;
path_mtu_reduction_deadline_ = path_mtu_reduction_deadline;
QUIC_BUG_IF(quic_bug_12708_1, blackhole_deadline_.IsInitialized() &&
blackhole_deadline_ != GetLastDeadline())
<< "Blackhole detection deadline should be the last deadline.";
UpdateAlarm();
}
QuicTime QuicNetworkBlackholeDetector::GetEarliestDeadline() const {
QuicTime result = QuicTime::Zero();
for (QuicTime t : {path_degrading_deadline_, blackhole_deadline_,
path_mtu_reduction_deadline_}) {
if (!t.IsInitialized()) {
continue;
}
if (!result.IsInitialized() || t < result) {
result = t;
}
}
return result;
}
QuicTime QuicNetworkBlackholeDetector::GetLastDeadline() const {
return std::max({path_degrading_deadline_, blackhole_deadline_,
path_mtu_reduction_deadline_});
}
void QuicNetworkBlackholeDetector::UpdateAlarm() const {
if (alarm_.IsPermanentlyCancelled()) {
return;
}
QuicTime next_deadline = GetEarliestDeadline();
QUIC_DVLOG(1) << "Updating alarm. next_deadline:" << next_deadline
<< ", path_degrading_deadline_:" << path_degrading_deadline_
<< ", path_mtu_reduction_deadline_:"
<< path_mtu_reduction_deadline_
<< ", blackhole_deadline_:" << blackhole_deadline_;
alarm_.Update(next_deadline, kAlarmGranularity);
}
bool QuicNetworkBlackholeDetector::IsDetectionInProgress() const {
return alarm_.IsSet();
}
} | #include "quiche/quic/core/quic_network_blackhole_detector.h"
#include "quiche/quic/core/quic_connection_alarms.h"
#include "quiche/quic/core/quic_one_block_arena.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_quic_connection_alarms.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
class QuicNetworkBlackholeDetectorPeer {
public:
static QuicAlarm* GetAlarm(QuicNetworkBlackholeDetector* detector) {
return &detector->alarm_;
}
};
namespace {
class MockDelegate : public QuicNetworkBlackholeDetector::Delegate {
public:
MOCK_METHOD(void, OnPathDegradingDetected, (), (override));
MOCK_METHOD(void, OnBlackholeDetected, (), (override));
MOCK_METHOD(void, OnPathMtuReductionDetected, (), (override));
};
const size_t kPathDegradingDelayInSeconds = 5;
const size_t kPathMtuReductionDelayInSeconds = 7;
const size_t kBlackholeDelayInSeconds = 10;
class QuicNetworkBlackholeDetectorTest : public QuicTest {
public:
QuicNetworkBlackholeDetectorTest()
: alarms_(&connection_alarms_delegate_, alarm_factory_, arena_),
detector_(&delegate_, &alarms_.network_blackhole_detector_alarm()),
alarm_(static_cast<MockAlarmFactory::TestAlarm*>(
QuicNetworkBlackholeDetectorPeer::GetAlarm(&detector_))),
path_degrading_delay_(
QuicTime::Delta::FromSeconds(kPathDegradingDelayInSeconds)),
path_mtu_reduction_delay_(
QuicTime::Delta::FromSeconds(kPathMtuReductionDelayInSeconds)),
blackhole_delay_(
QuicTime::Delta::FromSeconds(kBlackholeDelayInSeconds)) {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
ON_CALL(connection_alarms_delegate_, OnNetworkBlackholeDetectorAlarm())
.WillByDefault([&] { detector_.OnAlarm(); });
}
protected:
void RestartDetection() {
detector_.RestartDetection(clock_.Now() + path_degrading_delay_,
clock_.Now() + blackhole_delay_,
clock_.Now() + path_mtu_reduction_delay_);
}
testing::StrictMock<MockDelegate> delegate_;
MockConnectionAlarmsDelegate connection_alarms_delegate_;
QuicConnectionArena arena_;
MockAlarmFactory alarm_factory_;
QuicConnectionAlarms alarms_;
QuicNetworkBlackholeDetector detector_;
MockAlarmFactory::TestAlarm* alarm_;
MockClock clock_;
const QuicTime::Delta path_degrading_delay_;
const QuicTime::Delta path_mtu_reduction_delay_;
const QuicTime::Delta blackhole_delay_;
};
TEST_F(QuicNetworkBlackholeDetectorTest, StartAndFire) {
EXPECT_FALSE(detector_.IsDetectionInProgress());
RestartDetection();
EXPECT_TRUE(detector_.IsDetectionInProgress());
EXPECT_EQ(clock_.Now() + path_degrading_delay_, alarm_->deadline());
clock_.AdvanceTime(path_degrading_delay_);
EXPECT_CALL(delegate_, OnPathDegradingDetected());
alarm_->Fire();
EXPECT_TRUE(detector_.IsDetectionInProgress());
EXPECT_EQ(clock_.Now() + path_mtu_reduction_delay_ - path_degrading_delay_,
alarm_->deadline());
clock_.AdvanceTime(path_mtu_reduction_delay_ - path_degrading_delay_);
EXPECT_CALL(delegate_, OnPathMtuReductionDetected());
alarm_->Fire();
EXPECT_TRUE(detector_.IsDetectionInProgress());
EXPECT_EQ(clock_.Now() + blackhole_delay_ - path_mtu_reduction_delay_,
alarm_->deadline());
clock_.AdvanceTime(blackhole_delay_ - path_mtu_reduction_delay_);
EXPECT_CALL(delegate_, OnBlackholeDetected());
alarm_->Fire();
EXPECT_FALSE(detector_.IsDetectionInProgress());
}
TEST_F(QuicNetworkBlackholeDetectorTest, RestartAndStop) {
RestartDetection();
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
RestartDetection();
EXPECT_EQ(clock_.Now() + path_degrading_delay_, alarm_->deadline());
detector_.StopDetection(false);
EXPECT_FALSE(detector_.IsDetectionInProgress());
}
TEST_F(QuicNetworkBlackholeDetectorTest, PathDegradingFiresAndRestart) {
EXPECT_FALSE(detector_.IsDetectionInProgress());
RestartDetection();
EXPECT_TRUE(detector_.IsDetectionInProgress());
EXPECT_EQ(clock_.Now() + path_degrading_delay_, alarm_->deadline());
clock_.AdvanceTime(path_degrading_delay_);
EXPECT_CALL(delegate_, OnPathDegradingDetected());
alarm_->Fire();
EXPECT_TRUE(detector_.IsDetectionInProgress());
EXPECT_EQ(clock_.Now() + path_mtu_reduction_delay_ - path_degrading_delay_,
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
RestartDetection();
EXPECT_EQ(clock_.Now() + path_degrading_delay_, alarm_->deadline());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_network_blackhole_detector.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_network_blackhole_detector_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c2ba4f36-53a8-4ce4-bbf5-d1fb8053459d | cpp | tensorflow/tensorflow | rename_attribute | tensorflow/tools/graph_transforms/rename_attribute.cc | tensorflow/tools/graph_transforms/rename_attribute_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RenameAttribute(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
if (!context.params.count("old_attribute_name") ||
(context.params.at("old_attribute_name").size() != 1) ||
!context.params.count("new_attribute_name") ||
(context.params.at("new_attribute_name").size() != 1)) {
return errors::InvalidArgument(
"rename_attribute expects exactly one 'old_attribute_name' and one "
"'new_attribute_name' argument, e.g. "
"rename_attribute(old_attribute_name=foo, new_attribute_name=bar)");
}
string op_name;
if (context.params.count("op_name")) {
op_name = context.params.at("op_name")[0];
} else {
op_name = "*";
}
const string old_attribute_name = context.params.at("old_attribute_name")[0];
const string new_attribute_name = context.params.at("new_attribute_name")[0];
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
if (((op_name == "*") || (op_name == node.op())) &&
(node.attr().count(old_attribute_name))) {
AttrValue attribute_value = node.attr().at(old_attribute_name);
new_node->mutable_attr()->erase(old_attribute_name);
new_node->mutable_attr()->insert({new_attribute_name, attribute_value});
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("rename_attribute", RenameAttribute);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RenameAttribute(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class RenameAttributeTest : public ::testing::Test {
protected:
void TestRenameAttribute() {
GraphDef graph_def;
NodeDef* mul_node1 = graph_def.add_node();
mul_node1->set_name("mul_node1");
mul_node1->set_op("Mul");
mul_node1->add_input("add_node2");
mul_node1->add_input("add_node3");
AddNodeAttr("foo", 23, mul_node1);
AddNodeAttr("bar", "something", mul_node1);
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
AddNodeAttr("foo", 46, add_node2);
AddNodeAttr("bob", 23, add_node2);
AddNodeAttr("bar", "something else", add_node2);
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node1");
add_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* add_node4 = graph_def.add_node();
add_node4->set_name("add_node4");
add_node4->set_op("Add");
add_node4->add_input("add_node2");
add_node4->add_input("add_node3");
GraphDef wildcard_result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"mul_node1"};
context.params.insert(
std::pair<string, std::vector<string>>({"op_name", {string("*")}}));
context.params.insert(std::pair<string, std::vector<string>>(
{"old_attribute_name", {string("foo")}}));
context.params.insert(std::pair<string, std::vector<string>>(
{"new_attribute_name", {string("baz")}}));
TF_ASSERT_OK(RenameAttribute(graph_def, context, &wildcard_result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(wildcard_result, &node_lookup);
EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("baz"));
EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar"));
EXPECT_EQ(0, node_lookup.at("add_node2")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("baz"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob"));
GraphDef targeted_result;
TransformFuncContext targeted_context;
targeted_context.input_names = {};
targeted_context.output_names = {"mul_node1"};
targeted_context.params.insert(
std::pair<string, std::vector<string>>({"op_name", {string("Mul")}}));
targeted_context.params.insert(std::pair<string, std::vector<string>>(
{"old_attribute_name", {string("foo")}}));
targeted_context.params.insert(std::pair<string, std::vector<string>>(
{"new_attribute_name", {string("baz")}}));
TF_ASSERT_OK(
RenameAttribute(graph_def, targeted_context, &targeted_result));
MapNamesToNodes(targeted_result, &node_lookup);
EXPECT_EQ(0, node_lookup.at("mul_node1")->attr().count("foo"));
EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("baz"));
EXPECT_EQ(1, node_lookup.at("mul_node1")->attr().count("bar"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("foo"));
EXPECT_EQ(0, node_lookup.at("add_node2")->attr().count("baz"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bar"));
EXPECT_EQ(1, node_lookup.at("add_node2")->attr().count("bob"));
}
};
TEST_F(RenameAttributeTest, TestRenameAttribute) { TestRenameAttribute(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_attribute.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_attribute_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60f7a046-9b9b-48c8-8cb5-20c774aecc0b | cpp | google/tensorstore | cache | tensorstore/internal/cache/cache.cc | tensorstore/internal/cache/cache_test.cc | #include "tensorstore/internal/cache/cache.h"
#include <stddef.h>
#include <stdint.h>
#include <array>
#include <atomic>
#include <bitset>
#include <cassert>
#include <memory>
#include <mutex>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/cache/cache_pool_limits.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/type_traits.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal_cache {
auto& hit_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/hit_count", MetricMetadata("Number of cache hits."));
auto& miss_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/miss_count", MetricMetadata("Number of cache misses."));
auto& evict_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/evict_count",
MetricMetadata("Number of evictions from the cache."));
using ::tensorstore::internal::PinnedCacheEntry;
#if !defined(NDEBUG)
inline void DebugAssertMutexHeld(absl::Mutex* mutex) { mutex->AssertHeld(); }
#else
inline void DebugAssertMutexHeld(absl::Mutex* mutex) {}
#endif
using LruListAccessor =
internal::intrusive_linked_list::MemberAccessor<LruListNode>;
CachePoolImpl::CachePoolImpl(const CachePool::Limits& limits)
: limits_(limits),
total_bytes_(0),
strong_references_(1),
weak_references_(1) {
Initialize(LruListAccessor{}, &eviction_queue_);
}
namespace {
inline void AcquireWeakReference(CachePoolImpl* p) {
[[maybe_unused]] auto old_count =
p->weak_references_.fetch_add(1, std::memory_order_relaxed);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:weak:increment", p,
old_count + 1);
}
void ReleaseWeakReference(CachePoolImpl* p) {
auto new_count = --p->weak_references_;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:weak:decrement", p,
new_count);
if (new_count == 0) {
delete Access::StaticCast<CachePool>(p);
}
}
struct DecrementCacheReferenceCount {
explicit DecrementCacheReferenceCount(CacheImpl* cache_impl, size_t amount) {
old_count = cache_impl->reference_count_.fetch_sub(
amount, std::memory_order_acq_rel);
new_count = old_count - amount;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("Cache:decrement", cache_impl,
new_count);
}
bool should_delete() const {
return !CacheImpl::ShouldDelete(old_count) &&
CacheImpl::ShouldDelete(new_count);
}
bool should_release_cache_pool_weak_reference() const {
assert(old_count - new_count == CacheImpl::kStrongReferenceIncrement);
return !CacheImpl::ShouldHoldPoolWeakReference(new_count);
}
size_t old_count, new_count;
};
void UnlinkListNode(LruListNode* node) noexcept {
Remove(LruListAccessor{}, node);
Initialize(LruListAccessor{}, node);
}
void UnregisterEntryFromPool(CacheEntryImpl* entry,
CachePoolImpl* pool) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
UnlinkListNode(entry);
pool->total_bytes_.fetch_sub(entry->num_bytes_, std::memory_order_relaxed);
}
void AddToEvictionQueue(CachePoolImpl* pool, CacheEntryImpl* entry) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
auto* eviction_queue = &pool->eviction_queue_;
if (!OnlyContainsNode(LruListAccessor{}, entry)) {
Remove(LruListAccessor{}, entry);
}
InsertBefore(LruListAccessor{}, eviction_queue, entry);
}
void DestroyCache(CachePoolImpl* pool, CacheImpl* cache);
void MaybeEvictEntries(CachePoolImpl* pool) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
constexpr size_t kBufferSize = 64;
std::array<CacheEntryImpl*, kBufferSize> entries_to_delete;
std::bitset<kBufferSize> should_delete_cache_for_entry;
size_t num_entries_to_delete = 0;
const auto destroy_entries = [&] {
internal::ScopedWriterUnlock unlock(pool->lru_mutex_);
for (size_t i = 0; i < num_entries_to_delete; ++i) {
auto* entry = entries_to_delete[i];
if (should_delete_cache_for_entry[i]) {
DestroyCache(entry->cache_->pool_, entry->cache_);
}
entry->cache_ = nullptr;
delete Access::StaticCast<CacheEntry>(entry);
}
};
while (pool->total_bytes_.load(std::memory_order_acquire) >
pool->limits_.total_bytes_limit) {
auto* queue = &pool->eviction_queue_;
if (queue->next == queue) {
break;
}
auto* entry = static_cast<CacheEntryImpl*>(queue->next);
auto* cache = entry->cache_;
bool evict = false;
bool should_delete_cache = false;
auto& shard = cache->ShardForKey(entry->key_);
if (absl::MutexLock lock(&shard.mutex);
entry->reference_count_.load(std::memory_order_acquire) == 0) {
[[maybe_unused]] size_t erase_count = shard.entries.erase(entry);
assert(erase_count == 1);
if (shard.entries.empty()) {
if (DecrementCacheReferenceCount(cache,
CacheImpl::kNonEmptyShardIncrement)
.should_delete()) {
should_delete_cache = true;
}
}
evict = true;
}
if (!evict) {
UnlinkListNode(entry);
continue;
}
UnregisterEntryFromPool(entry, pool);
evict_count.Increment();
should_delete_cache_for_entry[num_entries_to_delete] = should_delete_cache;
entries_to_delete[num_entries_to_delete++] = entry;
if (num_entries_to_delete == entries_to_delete.size()) {
destroy_entries();
num_entries_to_delete = 0;
}
}
destroy_entries();
}
void InitializeNewEntry(CacheEntryImpl* entry, CacheImpl* cache) noexcept {
entry->cache_ = cache;
entry->reference_count_.store(2, std::memory_order_relaxed);
entry->num_bytes_ = 0;
Initialize(LruListAccessor{}, entry);
}
void DestroyCache(CachePoolImpl* pool,
CacheImpl* cache) ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (pool) {
if (!cache->cache_identifier_.empty()) {
absl::MutexLock lock(&pool->caches_mutex_);
auto it = pool->caches_.find(cache);
if (it != pool->caches_.end() && *it == cache) {
pool->caches_.erase(it);
}
}
if (HasLruCache(pool)) {
absl::MutexLock lru_lock(&pool->lru_mutex_);
for (auto& shard : cache->shards_) {
absl::MutexLock lock(&shard.mutex);
for (CacheEntryImpl* entry : shard.entries) {
entry->reference_count_.fetch_add(2, std::memory_order_acq_rel);
UnregisterEntryFromPool(entry, pool);
}
}
} else {
for (auto& shard : cache->shards_) {
absl::MutexLock lock(&shard.mutex);
for (CacheEntryImpl* entry : shard.entries) {
entry->reference_count_.fetch_add(2, std::memory_order_acq_rel);
}
}
}
for (auto& shard : cache->shards_) {
for (CacheEntryImpl* entry : shard.entries) {
assert(entry->reference_count_.load() >= 2 &&
entry->reference_count_.load() <= 3);
delete Access::StaticCast<Cache::Entry>(entry);
}
}
}
delete Access::StaticCast<Cache>(cache);
}
template <typename T, typename LockFn>
inline UniqueWriterLock<absl::Mutex> DecrementReferenceCountWithLock(
std::atomic<T>& reference_count, LockFn mutex_fn, T& new_count,
internal::type_identity_t<T> decrease_amount,
internal::type_identity_t<T> lock_threshold) {
static_assert(std::is_invocable_v<LockFn>);
static_assert(std::is_same_v<absl::Mutex&, std::invoke_result_t<LockFn>>);
{
auto count = reference_count.load(std::memory_order_relaxed);
while (true) {
if (count <= lock_threshold + decrease_amount) break;
if (reference_count.compare_exchange_weak(count, count - decrease_amount,
std::memory_order_acq_rel)) {
new_count = count - decrease_amount;
return {};
}
}
}
UniqueWriterLock lock(mutex_fn());
auto count =
reference_count.fetch_sub(decrease_amount, std::memory_order_acq_rel) -
decrease_amount;
new_count = count;
if (count > lock_threshold) {
return {};
}
return lock;
}
}
void StrongPtrTraitsCacheEntry::decrement_impl(
CacheEntryImpl* entry_impl) noexcept ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto* cache = entry_impl->cache_;
uint32_t new_count;
if (auto* pool_impl = cache->pool_) {
if (pool_impl->limits_.total_bytes_limit == 0) {
CacheImpl::Shard* shard = nullptr;
auto lock = DecrementReferenceCountWithLock(
entry_impl->reference_count_,
[&]() -> absl::Mutex& {
shard = &cache->ShardForKey(entry_impl->key_);
return shard->mutex;
},
new_count,
2, 1);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement",
entry_impl, new_count);
if (!lock) return;
if (new_count == 0) {
shard->entries.erase(entry_impl);
if (shard->entries.empty()) {
cache->reference_count_.fetch_sub(CacheImpl::kNonEmptyShardIncrement,
std::memory_order_relaxed);
}
delete entry_impl;
}
} else {
auto lock = DecrementReferenceCountWithLock(
entry_impl->reference_count_,
[pool_impl]() -> absl::Mutex& { return pool_impl->lru_mutex_; },
new_count,
2, 1);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement",
entry_impl, new_count);
if (!lock) return;
if (new_count == 0) {
AddToEvictionQueue(pool_impl, entry_impl);
MaybeEvictEntries(pool_impl);
}
}
assert(new_count <= 1);
} else {
new_count =
entry_impl->reference_count_.fetch_sub(2, std::memory_order_acq_rel) -
2;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement",
entry_impl, new_count);
if (new_count > 1) return;
delete entry_impl;
}
StrongPtrTraitsCache::decrement(Access::StaticCast<Cache>(cache));
}
inline bool TryToAcquireCacheStrongReference(CachePoolImpl* pool,
CacheImpl* cache_impl) {
auto old_count = cache_impl->reference_count_.load(std::memory_order_relaxed);
while (true) {
if (CacheImpl::ShouldDelete(old_count)) {
return false;
}
if (cache_impl->reference_count_.compare_exchange_weak(
old_count, old_count + CacheImpl::kStrongReferenceIncrement,
std::memory_order_acq_rel)) {
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT(
"Cache:increment", cache_impl,
old_count + CacheImpl::kStrongReferenceIncrement);
if (!CacheImpl::ShouldHoldPoolWeakReference(old_count)) {
AcquireWeakReference(pool);
}
return true;
}
}
}
CachePtr<Cache> GetCacheInternal(
CachePoolImpl* pool, const std::type_info& cache_type,
std::string_view cache_key,
absl::FunctionRef<std::unique_ptr<Cache>()> make_cache) {
CachePoolImpl::CacheKey key(cache_type, cache_key);
if (pool && !cache_key.empty()) {
absl::MutexLock lock(&pool->caches_mutex_);
auto it = pool->caches_.find(key);
if (it != pool->caches_.end()) {
auto* cache = *it;
if (!TryToAcquireCacheStrongReference(pool, cache)) {
pool->caches_.erase(it);
} else {
return CachePtr<Cache>(Access::StaticCast<Cache>(cache),
internal::adopt_object_ref);
}
}
}
std::unique_ptr<Cache> new_cache = make_cache();
if (!new_cache) return CachePtr<Cache>();
auto* cache_impl = Access::StaticCast<CacheImpl>(new_cache.get());
cache_impl->pool_ = pool;
if (!pool || cache_key.empty()) {
if (pool) {
AcquireWeakReference(pool);
}
new_cache.release();
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT(
"Cache:increment", cache_impl, CacheImpl::kStrongReferenceIncrement);
cache_impl->reference_count_.store(CacheImpl::kStrongReferenceIncrement,
std::memory_order_relaxed);
return CachePtr<Cache>(Access::StaticCast<Cache>(cache_impl),
internal::adopt_object_ref);
}
cache_impl->cache_type_ = &cache_type;
cache_impl->cache_identifier_ = std::string(cache_key);
absl::MutexLock lock(&pool->caches_mutex_);
auto insert_result = pool->caches_.insert(cache_impl);
if (insert_result.second ||
!TryToAcquireCacheStrongReference(pool, *insert_result.first)) {
if (!insert_result.second) {
const_cast<CacheImpl*&>(*insert_result.first) = cache_impl;
}
new_cache.release();
size_t initial_count = CacheImpl::kStrongReferenceIncrement;
if (pool->strong_references_.load(std::memory_order_relaxed) != 0) {
initial_count += CacheImpl::kCachePoolStrongReferenceIncrement;
}
cache_impl->reference_count_.store(initial_count,
std::memory_order_relaxed);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("Cache:increment", cache_impl,
initial_count);
AcquireWeakReference(pool);
}
return CachePtr<Cache>(Access::StaticCast<Cache>(*insert_result.first),
internal::adopt_object_ref);
}
PinnedCacheEntry<Cache> GetCacheEntryInternal(internal::Cache* cache,
std::string_view key) {
auto* cache_impl = Access::StaticCast<CacheImpl>(cache);
PinnedCacheEntry<Cache> returned_entry;
if (!cache_impl->pool_) {
std::string temp_key(key);
auto* entry_impl =
Access::StaticCast<CacheEntryImpl>(cache->DoAllocateEntry());
entry_impl->key_ = std::move(temp_key);
InitializeNewEntry(entry_impl, cache_impl);
StrongPtrTraitsCache::increment(cache);
returned_entry = PinnedCacheEntry<Cache>(
Access::StaticCast<CacheEntry>(entry_impl), internal::adopt_object_ref);
} else {
auto& shard = cache_impl->ShardForKey(key);
absl::MutexLock lock(&shard.mutex);
auto it = shard.entries.find(key);
if (it != shard.entries.end()) {
hit_count.Increment();
auto* entry_impl = *it;
auto old_count =
entry_impl->reference_count_.fetch_add(2, std::memory_order_acq_rel);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:increment",
entry_impl, old_count + 2);
if (old_count <= 1) {
StrongPtrTraitsCache::increment(cache);
}
returned_entry =
PinnedCacheEntry<Cache>(Access::StaticCast<Cache::Entry>(entry_impl),
internal::adopt_object_ref);
} else {
miss_count.Increment();
std::string temp_key(key);
auto* entry_impl =
Access::StaticCast<CacheEntryImpl>(cache->DoAllocateEntry());
entry_impl->key_ = std::move(temp_key);
InitializeNewEntry(entry_impl, cache_impl);
std::unique_ptr<CacheEntry> entry(
Access::StaticCast<CacheEntry>(entry_impl));
[[maybe_unused]] auto inserted = shard.entries.insert(entry_impl).second;
assert(inserted);
if (shard.entries.size() == 1) {
cache_impl->reference_count_.fetch_add(
CacheImpl::kNonEmptyShardIncrement, std::memory_order_relaxed);
}
StrongPtrTraitsCache::increment(cache);
returned_entry =
PinnedCacheEntry<Cache>(entry.release(), internal::adopt_object_ref);
}
}
auto* entry_impl = Access::StaticCast<CacheEntryImpl>(returned_entry.get());
absl::call_once(entry_impl->initialized_, [&] {
returned_entry->DoInitialize();
if (HasLruCache(cache_impl->pool_)) {
size_t new_size = entry_impl->num_bytes_ =
cache->DoGetSizeInBytes(returned_entry.get());
UpdateTotalBytes(*cache_impl->pool_, new_size);
}
});
return returned_entry;
}
void StrongPtrTraitsCache::decrement_impl(CacheImpl* cache) noexcept {
auto decrement_result =
DecrementCacheReferenceCount(cache, CacheImpl::kStrongReferenceIncrement);
CachePoolImpl* pool = nullptr;
if (decrement_result.should_release_cache_pool_weak_reference()) {
pool = cache->pool_;
}
if (decrement_result.should_delete()) {
DestroyCache(cache->pool_, cache);
}
if (pool) {
ReleaseWeakReference(pool);
}
}
CacheImpl::CacheImpl() : pool_(nullptr), reference_count_(0) {}
CacheImpl::~CacheImpl() = default;
void StrongPtrTraitsCachePool::increment(CachePool* p) noexcept {
auto* pool = Access::StaticCast<CachePoolImpl>(p);
if (pool->strong_references_.fetch_add(1, std::memory_order_acq_rel) == 0) {
AcquireWeakReference(Access::StaticCast<CachePoolImpl>(p));
}
}
void StrongPtrTraitsCachePool::decrement(CachePool* p) noexcept {
auto* pool = Access::StaticCast<CachePoolImpl>(p);
size_t new_count;
auto lock = DecrementReferenceCountWithLock(
pool->strong_references_,
[pool]() -> absl::Mutex& { return pool->caches_mutex_; }, new_count,
1, 0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:decrement", p,
new_count);
if (!lock) return;
std::vector<CacheImpl*> caches;
caches.reserve(pool->caches_.size());
for (auto* cache : pool->caches_) {
if (DecrementCacheReferenceCount(
cache, CacheImpl::kCachePoolStrongReferenceIncrement)
.should_delete()) {
caches.push_back(cache);
}
}
lock.unlock();
for (auto* cache : caches) {
DestroyCache(pool, cache);
}
ReleaseWeakReference(pool);
}
void WeakPtrTraitsCachePool::increment(CachePool* p) noexcept {
AcquireWeakReference(Access::StaticCast<CachePoolImpl>(p));
}
void WeakPtrTraitsCachePool::decrement(CachePool* p) noexcept {
ReleaseWeakReference(Access::StaticCast<CachePoolImpl>(p));
}
void intrusive_ptr_decrement(CacheEntryWeakState* p)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
size_t new_weak_count;
auto weak_lock = DecrementReferenceCountWithLock(
p->weak_references, [p]() -> absl::Mutex& { return p->mutex; },
new_weak_count,
1, 0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntryWeakState:decrement", p,
new_weak_count);
if (!weak_lock) return;
auto* entry = p->entry;
if (!entry) {
weak_lock = {};
delete p;
return;
}
uint32_t new_count;
auto* cache = entry->cache_;
auto* pool = cache->pool_;
ABSL_ASSUME(pool);
if (!HasLruCache(pool)) {
CacheImpl::Shard* shard = nullptr;
auto entries_lock = DecrementReferenceCountWithLock(
entry->reference_count_,
[&]() -> absl::Mutex& {
shard = &cache->ShardForKey(entry->key_);
return shard->mutex;
},
new_count,
1, 0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", entry,
new_count);
weak_lock = {};
if (!entries_lock) return;
[[maybe_unused]] size_t erase_count = shard->entries.erase(entry);
assert(erase_count == 1);
bool should_delete_cache = false;
if (shard->entries.empty()) {
if (DecrementCacheReferenceCount(cache,
CacheImpl::kNonEmptyShardIncrement)
.should_delete()) {
should_delete_cache = true;
}
}
entries_lock = {};
delete Access::StaticCast<CacheEntry>(entry);
if (should_delete_cache) {
DestroyCache(pool, cache);
}
return;
}
auto pool_lock = DecrementReferenceCountWithLock(
entry->reference_count_,
[pool]() -> absl::Mutex& { return pool->lru_mutex_; }, new_count,
1,
0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", entry,
new_count);
if (!pool_lock) return;
weak_lock = {};
AddToEvictionQueue(pool, entry);
MaybeEvictEntries(pool);
}
internal::IntrusivePtr<CacheEntryWeakState> AcquireWeakCacheEntryReference(
CacheEntryImpl* entry_impl) {
CacheEntryWeakState* weak_state =
entry_impl->weak_state_.load(std::memory_order_acquire);
if (!weak_state) {
if (!entry_impl->cache_->pool_) {
return {};
}
auto* new_weak_state = new CacheEntryWeakState;
new_weak_state->entry = entry_impl;
new_weak_state->weak_references.store(1, std::memory_order_relaxed);
if (entry_impl->weak_state_.compare_exchange_strong(
weak_state, new_weak_state, std::memory_order_acq_rel)) {
entry_impl->reference_count_.fetch_add(1, std::memory_order_relaxed);
return internal::IntrusivePtr<CacheEntryWeakState>(
new_weak_state, internal::adopt_object_ref);
} else {
delete new_weak_state;
}
}
if (weak_state->weak_references.fetch_add(1, std::memory_order_acq_rel) ==
0) {
entry_impl->reference_count_.fetch_add(1, std::memory_order_relaxed);
}
return internal::IntrusivePtr<CacheEntryWeakState>(
weak_state, internal::adopt_object_ref);
}
void UpdateTotalBytes(CachePoolImpl& pool, ptrdiff_t change) {
assert(HasLruCache(&pool));
if (pool.total_bytes_.fetch_add(change, std::memory_order_acq_rel) + change <=
pool.limits_.total_bytes_limit ||
change <= 0) {
return;
}
absl::MutexLock lock(&pool.lru_mutex_);
MaybeEvictEntries(&pool);
}
}
namespace internal {
Cache::Cache() = default;
Cache::~Cache() = default;
size_t Cache::DoGetSizeInBytes(Cache::Entry* entry) {
return ((internal_cache::CacheEntryImpl*)entry)->key_.capacity() +
this->DoGetSizeofEntry();
}
CacheEntry::~CacheEntry() {
auto* weak_state = this->weak_state_.load(std::memory_order_relaxed);
if (!weak_state) return;
{
absl::MutexLock lock(&weak_state->mutex);
weak_state->entry = nullptr;
if (weak_state->weak_references.load(std::memory_order_acquire) != 0) {
return;
}
}
delete weak_state;
}
void CacheEntry::DoInitialize() {}
void CacheEntry::WriterLock() { mutex_.WriterLock(); }
void CacheEntry::WriterUnlock() {
UniqueWriterLock lock(mutex_, std::adopt_lock);
auto flags = std::exchange(flags_, 0);
if (!flags) return;
assert(flags & kSizeChanged);
auto& cache = GetOwningCache(*this);
auto* pool = cache.pool();
auto* pool_impl =
internal_cache::Access::StaticCast<internal_cache::CachePoolImpl>(pool);
if (!internal_cache::HasLruCache(pool_impl)) return;
const size_t new_size = cache.DoGetSizeInBytes(this);
ptrdiff_t change = new_size - std::exchange(num_bytes_, new_size);
lock.unlock();
internal_cache::UpdateTotalBytes(*pool_impl, change);
}
CachePool::StrongPtr CachePool::Make(const CachePool::Limits& cache_limits) {
CachePool::StrongPtr pool;
internal_cache::Access::StaticCast<internal_cache::CachePoolStrongPtr>(&pool)
->reset(new internal_cache::CachePool(cache_limits), adopt_object_ref);
return pool;
}
CachePool::StrongPtr::StrongPtr(const CachePool::WeakPtr& ptr)
: Base(ptr.get(), adopt_object_ref) {
if (!ptr) return;
auto* pool =
internal_cache::Access::StaticCast<internal_cache::CachePoolImpl>(
ptr.get());
absl::MutexLock lock(&pool->caches_mutex_);
if (pool->strong_references_.fetch_add(1, std::memory_order_acq_rel) == 0) {
internal_cache::AcquireWeakReference(pool);
for (auto* cache : pool->caches_) {
cache->reference_count_.fetch_add(
internal_cache::CacheImpl::kCachePoolStrongReferenceIncrement,
std::memory_order_acq_rel);
}
}
}
}
} | #include "tensorstore/internal/cache/cache.h"
#include <stddef.h>
#include <atomic>
#include <deque>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/testing/concurrent.h"
namespace {
using ::tensorstore::UniqueWriterLock;
using ::tensorstore::internal::Cache;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::CachePtr;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::WeakPinnedCacheEntry;
using ::tensorstore::internal_cache::Access;
using ::tensorstore::internal_cache::CacheEntryImpl;
using ::tensorstore::internal_cache::CacheImpl;
using ::tensorstore::internal_cache::CachePoolImpl;
using ::tensorstore::internal_cache::LruListNode;
using ::tensorstore::internal_testing::TestConcurrent;
using ::testing::ElementsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
CachePoolImpl* GetPoolImpl(const CachePool::StrongPtr& ptr) {
return Access::StaticCast<CachePoolImpl>(ptr.get());
}
CachePoolImpl* GetPoolImpl(const CachePool::WeakPtr& ptr) {
return Access::StaticCast<CachePoolImpl>(ptr.get());
}
class TestCache : public Cache {
public:
struct RequestLog {
absl::Mutex mutex;
std::deque<std::string> entry_allocate_log;
std::deque<std::pair<std::string, std::string>> entry_destroy_log;
std::deque<std::string> cache_allocate_log;
std::deque<std::string> cache_destroy_log;
};
class Entry : public Cache::Entry {
public:
using OwningCache = TestCache;
std::string data;
size_t size = 1;
void ChangeSize(size_t new_size) {
UniqueWriterLock<Cache::Entry> lock(*this);
size = new_size;
NotifySizeChanged();
}
~Entry() override {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->entry_destroy_log.emplace_back(cache_identifier_,
std::string(this->key()));
}
}
WeakPinnedCacheEntry weak_ref;
std::shared_ptr<RequestLog> log_;
std::string cache_identifier_;
};
explicit TestCache(std::shared_ptr<RequestLog> log = {}) : log_(log) {}
~TestCache() {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->cache_destroy_log.emplace_back(cache_identifier());
}
}
size_t DoGetSizeofEntry() override { return sizeof(Entry); }
Entry* DoAllocateEntry() override {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->entry_allocate_log.emplace_back(cache_identifier());
}
auto* entry = new Entry;
entry->cache_identifier_ = cache_identifier();
entry->log_ = log_;
return entry;
}
void OnDelete(Entry* entry) {}
size_t DoGetSizeInBytes(Cache::Entry* base_entry) override {
auto* entry = static_cast<Entry*>(base_entry);
return entry->size;
}
std::shared_ptr<RequestLog> log_;
};
class TestCacheWithCachePool : public TestCache {
public:
using TestCache::TestCache;
CachePool::WeakPtr cache_pool;
};
using EntryIdentifier = std::pair<std::string, void*>;
std::pair<std::string, void*> GetEntryIdentifier(CacheEntryImpl* entry) {
return {entry->key_, entry};
}
absl::flat_hash_set<EntryIdentifier> GetEntrySet(LruListNode* head) {
absl::flat_hash_set<EntryIdentifier> entries;
for (LruListNode* node = head->next; node != head; node = node->next) {
entries.emplace(
GetEntryIdentifier(Access::StaticCast<CacheEntryImpl>(node)));
}
return entries;
}
void AssertInvariants(const CachePool::StrongPtr& pool,
absl::flat_hash_set<Cache*> expected_caches)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto* pool_impl = GetPoolImpl(pool);
auto eviction_queue_entries = GetEntrySet(&pool_impl->eviction_queue_);
absl::flat_hash_set<EntryIdentifier> expected_eviction_queue_entries;
size_t expected_total_bytes = 0;
for (auto* cache : pool_impl->caches_) {
EXPECT_EQ(pool_impl, cache->pool_);
EXPECT_NE("", cache->cache_identifier_);
EXPECT_EQ(1, expected_caches.count(Access::StaticCast<Cache>(cache)));
}
EXPECT_EQ(1 + expected_caches.size(), pool_impl->weak_references_.load());
for (auto* cache : expected_caches) {
auto* cache_impl = Access::StaticCast<CacheImpl>(cache);
if (!cache_impl->cache_identifier_.empty()) {
auto it = pool_impl->caches_.find(cache_impl);
ASSERT_NE(it, pool_impl->caches_.end());
EXPECT_EQ(cache_impl, *it);
}
if (pool_impl->limits_.total_bytes_limit != 0) {
for (auto& shard : cache_impl->shards_) {
for (CacheEntryImpl* entry : shard.entries) {
EXPECT_EQ(
entry->num_bytes_,
cache->DoGetSizeInBytes(Access::StaticCast<Cache::Entry>(entry)));
expected_total_bytes += entry->num_bytes_;
if (entry->reference_count_.load() == 0) {
expected_eviction_queue_entries.emplace(GetEntryIdentifier(entry));
}
}
}
}
}
EXPECT_EQ(expected_total_bytes, pool_impl->total_bytes_);
EXPECT_THAT(expected_eviction_queue_entries,
::testing::IsSubsetOf(eviction_queue_entries));
}
#define TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(...) \
do { \
SCOPED_TRACE(""); \
AssertInvariants(__VA_ARGS__); \
} while (false)
template <typename CacheType = TestCache>
CachePtr<CacheType> GetTestCache(
CachePool* pool, std::string cache_identifier,
std::shared_ptr<TestCache::RequestLog> log = {}) {
return GetCache<CacheType>(pool, cache_identifier, [&] {
if (log) {
absl::MutexLock lock(&log->mutex);
log->cache_allocate_log.emplace_back(cache_identifier);
}
return std::make_unique<CacheType>(log);
});
}
TEST(CachePoolTest, GetCacheEmptyKey) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(pool.get(), "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
auto test_cache2 = GetTestCache(pool.get(), "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheEmptyKeyCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(nullptr, "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
auto test_cache2 = GetTestCache(nullptr, "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheNonEmptyKey) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
auto test_cache2 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
EXPECT_EQ(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("x"));
}
TEST(CachePoolTest, GetCacheNonEmptyKeyCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(nullptr, "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
auto test_cache2 = GetTestCache(nullptr, "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x", "x"));
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheNullptr) {
auto pool = CachePool::Make(CachePool::Limits{10000});
int make_cache_calls = 0;
auto make_cache = [&] {
++make_cache_calls;
return nullptr;
};
{
auto cache = GetCache<TestCache>(pool.get(), "x", make_cache);
EXPECT_EQ(nullptr, cache);
EXPECT_EQ(1, make_cache_calls);
}
{
auto cache = GetCache<TestCache>(pool.get(), "x", make_cache);
EXPECT_EQ(nullptr, cache);
EXPECT_EQ(2, make_cache_calls);
}
}
TEST(CachePoolTest, GetCacheNonEmptyKeyNoReferences) {
auto pool = CachePool::Make(CachePool::Limits{});
auto log = std::make_shared<TestCache::RequestLog>();
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
{
auto pool2 = pool;
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->strong_references_.load());
}
{
auto test_cache1 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
EXPECT_EQ(1, GetPoolImpl(pool)->caches_.size());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, test_cache1->use_count());
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("x"));
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(0, GetPoolImpl(pool)->caches_.size());
}
TEST(CachePoolTest, StrongToWeakToStrong) {
CachePool::StrongPtr strong_ptr = CachePool::Make({});
CachePool::WeakPtr weak_ptr(strong_ptr);
strong_ptr = CachePool::StrongPtr();
strong_ptr = CachePool::StrongPtr(weak_ptr);
weak_ptr = CachePool::WeakPtr();
}
class NamedOrAnonymousCacheTest : public ::testing::TestWithParam<const char*> {
public:
std::shared_ptr<TestCache::RequestLog> log =
std::make_shared<TestCache::RequestLog>();
std::string cache_key = GetParam();
CachePtr<TestCache> GetCache(const CachePool::StrongPtr& pool) {
return GetTestCache(pool.get(), cache_key, log);
}
};
INSTANTIATE_TEST_SUITE_P(WithoutCacheKey, NamedOrAnonymousCacheTest,
::testing::Values(""));
INSTANTIATE_TEST_SUITE_P(WithCacheKey, NamedOrAnonymousCacheTest,
::testing::Values("k"));
TEST_P(NamedOrAnonymousCacheTest, CacheEntryKeepsCacheAlive) {
{
PinnedCacheEntry<TestCache> entry;
{
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(cache_key));
entry = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
}
EXPECT_EQ(1, GetOwningCache(*entry).use_count());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre(cache_key));
}
TEST_P(NamedOrAnonymousCacheTest, GetWithImmediateEvict) {
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
EXPECT_EQ(1, test_cache->use_count());
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_EQ(2, test_cache->use_count());
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
e->data = "value";
EXPECT_EQ(1, e->use_count());
{
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ(2, test_cache->use_count());
EXPECT_EQ(2, e2->use_count());
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_EQ(1, e->use_count());
EXPECT_EQ(2, test_cache->use_count());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
EXPECT_EQ(1, test_cache->use_count());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key, cache_key));
EXPECT_EQ("", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair(cache_key, "a"), Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TEST_P(NamedOrAnonymousCacheTest, GetWithoutImmediateEvict) {
{
auto pool = CachePool::Make(kSmallCacheLimits);
auto test_cache = GetCache(pool);
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
e->data = "value";
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e1 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ("value", e1->data);
auto e2 = GetCacheEntry(test_cache, "b");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key, cache_key));
e2->data = "value2";
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
{
auto e1 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key, cache_key));
EXPECT_EQ("value", e1->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
{
auto e2 = GetCacheEntry(test_cache, "b");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key, cache_key));
EXPECT_EQ("value2", e2->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a"), Pair(cache_key, "b")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre(cache_key));
}
TEST(CacheTest, NamedGetWithoutImmediateEvict) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
{
auto test_cache = GetTestCache(pool.get(), "cache", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache"));
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
e->data = "value";
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
{
auto test_cache = GetTestCache(pool.get(), "cache");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
}
TEST_P(NamedOrAnonymousCacheTest, UpdateSizeThenEvict) {
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
{
auto entry = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
entry->data = "a";
entry->ChangeSize(5000);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("", GetCacheEntry(test_cache, "a")->data);
}
TEST_P(NamedOrAnonymousCacheTest, UpdateSizeNoEvict) {
CachePool::Limits limits;
limits.total_bytes_limit = 10000;
auto pool = CachePool::Make(limits);
auto test_cache = GetCache(pool);
{
auto entry = GetCacheEntry(test_cache, "a");
entry->data = "a";
entry->ChangeSize(1);
entry->ChangeSize(5000);
entry->ChangeSize(5000);
entry->ChangeSize(5000);
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto entry = GetCacheEntry(test_cache, "b");
entry->data = "b";
entry->ChangeSize(5000);
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("a", GetCacheEntry(test_cache, "a")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("b", GetCacheEntry(test_cache, "b")->data);
GetCacheEntry(test_cache, "c")->data = "c";
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("", GetCacheEntry(test_cache, "a")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("b", GetCacheEntry(test_cache, "b")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("c", GetCacheEntry(test_cache, "c")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a")));
}
TEST(CacheTest, CacheDependsOnOtherCache) {
class CacheA : public tensorstore::internal::Cache {
using Base = tensorstore::internal::Cache;
public:
class Entry : public Cache::Entry {};
using Base::Base;
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
};
class CacheB : public tensorstore::internal::Cache {
using Base = tensorstore::internal::Cache;
public:
class Entry : public Cache::Entry {};
using Base::Base;
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
CachePtr<CacheA> cache_a;
};
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache_a = GetCache<CacheA>(pool.get(), "x",
[&] { return std::make_unique<CacheA>(); });
auto cache_b = GetCache<CacheB>(pool.get(), "x",
[&] { return std::make_unique<CacheB>(); });
GetCacheEntry(cache_b, "key");
cache_b->cache_a = cache_a;
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool,
{cache_a.get(), cache_b.get()});
}
constexpr static int kDefaultIterations = 100;
TEST(CacheTest, ConcurrentGetCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
PinnedCacheEntry<TestCache> pinned_entries[3];
TestConcurrent(
kDefaultIterations,
[] {},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(2, cache->use_count());
for (auto& e : pinned_entries) {
e.reset();
}
EXPECT_EQ(1, cache->use_count());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
},
[&] { pinned_entries[0] = GetCacheEntry(cache, "a"); },
[&] { pinned_entries[1] = GetCacheEntry(cache, "a"); },
[&] { pinned_entries[2] = GetCacheEntry(cache, "a"); });
}
TEST(CacheTest, ConcurrentGetCacheEntryWeakReferenceCacheDisabled) {
auto cache = GetTestCache(nullptr, "cache");
PinnedCacheEntry<TestCache> entry;
TestConcurrent(
kDefaultIterations,
[&] { entry = GetCacheEntry(cache, "a"); },
[&] {},
[&] { entry->AcquireWeakReference(); },
[&] { entry->AcquireWeakReference(); });
}
TEST(CacheTest,
ConcurrentDestroyStrongAndWeakCacheEntryReferenceCacheDisabled) {
auto cache = GetTestCache(nullptr, "cache");
PinnedCacheEntry<TestCache> entry;
WeakPinnedCacheEntry weak_ref;
TestConcurrent(
kDefaultIterations,
[&] {
entry = GetCacheEntry(cache, "a");
weak_ref = entry->AcquireWeakReference();
},
[&] {},
[&] { entry = {}; }, [&] { weak_ref = {}; });
}
TEST(CacheTest, ConcurrentGetCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
CachePtr<TestCache> caches[3];
TestConcurrent(
kDefaultIterations,
[] {},
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {caches[0].get(), caches[1].get(), caches[2].get()});
size_t use_count = 3;
for (auto& cache : caches) {
EXPECT_EQ(use_count, cache->use_count());
cache.reset();
--use_count;
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] { caches[0] = GetTestCache(pool.get(), "cache"); },
[&] { caches[1] = GetTestCache(pool.get(), "cache"); },
[&] { caches[2] = GetTestCache(pool.get(), "cache"); });
}
TEST(CacheTest, ConcurrentReleaseCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
CachePtr<TestCache> caches[3];
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
for (auto& cache : caches) {
cache = GetTestCache(pool.get(), "cache");
}
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
},
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] { caches[0].reset(); }, [&] { caches[1].reset(); },
[&] { caches[2].reset(); });
}
TEST(CacheTest, ConcurrentGetReleaseCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
const auto concurrent_op = [&] {
auto cache = GetTestCache(pool.get(), "cache");
};
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] {},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, ConcurrentReleaseCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
PinnedCacheEntry<TestCache> pinned_entries[3];
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
for (auto& e : pinned_entries) {
e = GetCacheEntry(cache, "a");
}
EXPECT_EQ(2, cache->use_count());
},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, cache->use_count());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
},
[&] { pinned_entries[0].reset(); }, [&] { pinned_entries[1].reset(); },
[&] { pinned_entries[2].reset(); });
}
TEST(CacheTest, ConcurrentGetReleaseCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
const auto concurrent_op = [&] {
auto entry = GetCacheEntry(cache, "a");
};
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, cache->use_count());
},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, cache->use_count());
},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, ConcurrentDestroyCacheEvictEntries) {
CachePool::Limits limits = {};
limits.total_bytes_limit = 1;
auto pool = CachePool::Make(limits);
const auto concurrent_op = [&] {
auto cache = GetTestCache(pool.get(), "");
auto entry = GetCacheEntry(cache, "a");
};
TestConcurrent(
kDefaultIterations,
[&] {},
[&] {},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, EvictEntryDestroyCache) {
auto log = std::make_shared<TestCache::RequestLog>();
CachePool::Limits limits;
limits.total_bytes_limit = 1;
auto pool = CachePool::Make(limits);
auto cache_b = GetTestCache(pool.get(), "cache_b", log);
{
auto cache_a = GetTestCache(pool.get(), "cache_a", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_b", "cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
{
auto cache_a = GetTestCache(pool.get(), "cache_a");
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_b", "cache_a"));
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
ASSERT_EQ("entry_a", entry_a->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {cache_a.get(), cache_b.get()});
}
auto entry_b = GetCacheEntry(cache_b, "entry_b");
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache_a", "entry_a")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a"));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache_b.get()});
{
auto cache_a = GetTestCache(pool.get(), "cache_a");
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_EQ("", entry_a->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {cache_a.get(), cache_b.get()});
}
}
TEST(CacheTest, CachePoolWeakPtr) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
auto cache_a = GetTestCache(pool.get(), "cache_a", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
auto cache_b = GetTestCache(pool.get(), "cache_b", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a", "cache_b"));
auto entry_b = GetCacheEntry(cache_b, "entry_b");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a", "cache_b"));
entry_b->data = "entry_b";
EXPECT_EQ(3, GetPoolImpl(pool)->weak_references_.load());
cache_a.reset();
entry_a.reset();
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
CachePool::WeakPtr weak_pool(pool);
EXPECT_EQ(1, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(weak_pool)->weak_references_.load());
{
CachePool::StrongPtr strong_pool(pool);
EXPECT_EQ(2, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(weak_pool)->weak_references_.load());
}
EXPECT_EQ(1, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(weak_pool)->weak_references_.load());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
pool = {};
EXPECT_EQ(0, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(weak_pool)->weak_references_.load());
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a"));
{
auto cache_c = GetTestCache(weak_pool.get(), "cache_c", log);
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c"));
auto entry_c = GetCacheEntry(cache_c, "entry_c");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c"));
entry_c->data = "entry_c";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a", "cache_c"));
CachePool::StrongPtr strong_pool(weak_pool);
EXPECT_EQ(1, GetPoolImpl(strong_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(strong_pool)->weak_references_.load());
{
auto cache_d = GetTestCache(strong_pool.get(), "cache_d", log);
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c", "cache_d"));
auto entry_d = GetCacheEntry(cache_d, "entry_d");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c", "cache_d"));
entry_d->data = "entry_d";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a", "cache_c"));
}
TEST(CacheTest, TestCacheWithCachePool) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
{
auto cache_a =
GetTestCache<TestCacheWithCachePool>(pool.get(), "cache_a", log);
cache_a->cache_pool = CachePool::WeakPtr(pool);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
}
}
TEST(CacheTest, EntryWeakReference) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetTestCache(pool.get(), "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest, EntryWeakReferenceCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
auto cache = GetTestCache(nullptr, "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
}
TEST(CacheTest, EntryWeakReferencesCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
auto cache = GetTestCache(nullptr, "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
weak_ref = entry_a->AcquireWeakReference();
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
}
TEST(CacheTest, EntryWeakReferences) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetTestCache(pool.get(), "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
auto weak_ref2 = entry_a->AcquireWeakReference();
auto entry_a2 = GetCacheEntry(cache, "a");
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref2 = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a2 = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest, GetStrongEntryReferenceWhileHoldingOnlyWeakReference) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetTestCache(pool.get(), "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
entry_a = {};
entry_a = GetCacheEntry(cache, "a");
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest,
GetStrongEntryReferenceWhileHoldingOnlyWeakReferenceCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
auto cache = GetTestCache(nullptr, "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
entry_a = GetCacheEntry(cache, "a");
entry_a = {};
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair("", "a"), Pair("", "a")));
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair("", "a"), Pair("", "a")));
}
TEST(CacheTest, PoolWithNonZeroBytesLimit) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache", log);
{
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
cache = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest, WeakRefOwnedByEntry) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache1 = GetTestCache(pool.get(), "cache1", log);
auto cache2 = GetTestCache(pool.get(), "cache2", log);
{
auto entry_a = GetCacheEntry(cache1, "a");
auto entry_b = GetCacheEntry(cache1, "b");
entry_a->weak_ref = entry_b->AcquireWeakReference();
}
{ auto entry_c = GetCacheEntry(cache2, "c"); }
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
cache1 = {};
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair("cache1", "a"), Pair("cache1", "b")));
cache2 = {};
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair("cache1", "a"), Pair("cache1", "b"),
Pair("cache2", "c")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
99662d93-902f-4bd4-a780-65a01fee4b6b | cpp | tensorflow/tensorflow | allocator | third_party/xla/xla/tsl/framework/allocator.cc | tensorflow/core/framework/allocator_test.cc | #include "xla/tsl/framework/allocator.h"
#include <atomic>
#include "xla/tsl/framework/allocator_registry.h"
#include "xla/tsl/framework/tracking_allocator.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/types.h"
namespace tsl {
string AllocatorStats::DebugString() const {
return strings::Printf(
"Limit: %20lld\n"
"InUse: %20lld\n"
"MaxInUse: %20lld\n"
"NumAllocs: %20lld\n"
"MaxAllocSize: %20lld\n"
"Reserved: %20lld\n"
"PeakReserved: %20lld\n"
"LargestFreeBlock: %20lld\n",
static_cast<long long>(this->bytes_limit ? *this->bytes_limit : 0),
static_cast<long long>(this->bytes_in_use),
static_cast<long long>(this->peak_bytes_in_use),
static_cast<long long>(this->num_allocs),
static_cast<long long>(this->largest_alloc_size),
static_cast<long long>(this->bytes_reserved),
static_cast<long long>(this->peak_bytes_reserved),
static_cast<long long>(this->largest_free_block_bytes));
}
constexpr size_t Allocator::kAllocatorAlignment;
Allocator::~Allocator() {}
static bool cpu_allocator_collect_full_stats = false;
void EnableCPUAllocatorFullStats() { cpu_allocator_collect_full_stats = true; }
bool CPUAllocatorFullStatsEnabled() { return cpu_allocator_collect_full_stats; }
string AllocatorAttributes::DebugString() const {
return strings::StrCat("AllocatorAttributes(on_host=", on_host(),
" nic_compatible=", nic_compatible(),
" gpu_compatible=", gpu_compatible(), ")");
}
Allocator* cpu_allocator_base() {
static Allocator* cpu_alloc =
AllocatorFactoryRegistry::singleton()->GetAllocator();
if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
cpu_alloc = new TrackingAllocator(cpu_alloc, true);
}
return cpu_alloc;
}
Allocator* cpu_allocator(int numa_node) {
static ProcessStateInterface* ps =
AllocatorFactoryRegistry::singleton()->process_state();
if (ps) {
return ps->GetCPUAllocator(numa_node);
} else {
return cpu_allocator_base();
}
}
SubAllocator::SubAllocator(const std::vector<Visitor>& alloc_visitors,
const std::vector<Visitor>& free_visitors)
: alloc_visitors_(alloc_visitors), free_visitors_(free_visitors) {}
void SubAllocator::VisitAlloc(void* ptr, int index, size_t num_bytes) {
for (const auto& v : alloc_visitors_) {
v(ptr, index, num_bytes);
}
}
void SubAllocator::VisitFree(void* ptr, int index, size_t num_bytes) {
for (int i = free_visitors_.size() - 1; i >= 0; --i) {
free_visitors_[i](ptr, index, num_bytes);
}
}
} | #include "tensorflow/core/framework/allocator.h"
#include <algorithm>
#include <vector>
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tensorflow/core/framework/typed_allocator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/profiler/lib/profiler_session.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
static void CheckStats(Allocator* a, int64_t num_allocs, int64_t bytes_in_use,
int64_t peak_bytes_in_use, int64_t largest_alloc_size) {
absl::optional<AllocatorStats> stats = a->GetStats();
EXPECT_TRUE(stats);
if (!stats) {
return;
}
LOG(INFO) << "Alloc stats: \n" << stats->DebugString();
#if defined(PLATFORM_GOOGLE) && defined(NDEBUG)
static const int64 kSlop = 5 * 1024;
EXPECT_GT(stats->bytes_in_use, bytes_in_use - kSlop);
EXPECT_LT(stats->bytes_in_use, bytes_in_use + kSlop);
EXPECT_GT(stats->peak_bytes_in_use, peak_bytes_in_use - kSlop);
EXPECT_LT(stats->peak_bytes_in_use, peak_bytes_in_use + kSlop);
EXPECT_EQ(stats->num_allocs, num_allocs);
EXPECT_EQ(stats->largest_alloc_size, largest_alloc_size);
#endif
}
TEST(AllocatorAttributesTest, AllCombos) {
for (bool on_host : {false, true}) {
for (bool nic_compatible : {false, true}) {
for (bool gpu_compatible : {false, true}) {
AllocatorAttributes aa;
aa.set_on_host(on_host);
aa.set_nic_compatible(nic_compatible);
aa.set_gpu_compatible(gpu_compatible);
EXPECT_EQ(on_host, aa.on_host());
EXPECT_EQ(nic_compatible, aa.nic_compatible());
EXPECT_EQ(gpu_compatible, aa.gpu_compatible());
}
}
}
}
TEST(AllocatorAttributesTest, IsEqualOrLessRestrictiveThan) {
AllocatorAttributes a, b;
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(b));
b.set_gpu_compatible(true);
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_FALSE(b.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(b));
a.set_nic_compatible(true);
EXPECT_FALSE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_FALSE(b.IsEqualOrLessRestrictiveThan(a));
a.set_gpu_compatible(true);
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(a));
EXPECT_FALSE(a.IsEqualOrLessRestrictiveThan(b));
}
TEST(AllocatorAttributesTest, Merge) {
AllocatorAttributes a, b;
EXPECT_EQ(a.value, 0);
EXPECT_EQ(b.value, 0);
EXPECT_FALSE(a.nic_compatible());
EXPECT_FALSE(b.nic_compatible());
b.set_nic_compatible(true);
a.Merge(b);
EXPECT_TRUE(a.nic_compatible());
EXPECT_TRUE(b.nic_compatible());
EXPECT_EQ(a.scope_id, 0);
EXPECT_EQ(b.scope_id, 0);
a.scope_id = 1;
a.Merge(b);
EXPECT_EQ(a.scope_id, 1);
EXPECT_EQ(b.scope_id, 0);
a.scope_id = 1;
b.scope_id = 0;
b.Merge(a);
EXPECT_EQ(a.scope_id, 1);
EXPECT_EQ(b.scope_id, 1);
a.scope_id = 2;
b.scope_id = 2;
a.Merge(b);
EXPECT_EQ(a.scope_id, 2);
EXPECT_EQ(b.scope_id, 2);
}
TEST(AllocatorAttributesDeathTest, MergeDifferentScopeIds) {
AllocatorAttributes a, b;
a.scope_id = 3;
b.scope_id = 4;
EXPECT_DEATH({ a.Merge(b); }, "");
}
TEST(CPUAllocatorTest, Simple) {
EnableCPUAllocatorStats();
Allocator* a = cpu_allocator();
std::vector<void*> ptrs;
for (int s = 1; s < 1024; s++) {
void* raw = a->AllocateRaw(1, s);
ptrs.push_back(raw);
}
std::sort(ptrs.begin(), ptrs.end());
CheckStats(a, 1023, 552640, 552640, 1024);
for (size_t i = 0; i < ptrs.size(); i++) {
if (i > 0) {
CHECK_NE(ptrs[i], ptrs[i - 1]);
}
a->DeallocateRaw(ptrs[i]);
}
CheckStats(a, 1023, 0, 552640, 1024);
float* t1 = TypedAllocator::Allocate<float>(a, 1024, {});
double* t2 = TypedAllocator::Allocate<double>(a, 1048576, {});
CheckStats(a, 1025, 1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double));
TypedAllocator::Deallocate(a, t1, 1024);
TypedAllocator::Deallocate(a, t2, 1048576);
CheckStats(a, 1025, 0, 1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double));
CHECK(a->ClearStats());
CheckStats(a, 0, 0, 0, 0);
DisableCPUAllocatorStats();
}
struct TestStruct {
int x;
};
TEST(CPUAllocatorTest, CheckStructSize) { CHECK_GT(sizeof(TestStruct), 1); }
TEST(CPUAllocatorTest, AllocateOverflowMaxSizeT) {
Allocator* a = cpu_allocator();
size_t count_to_allocate = std::numeric_limits<size_t>::max();
TestStruct* const test_pointer =
TypedAllocator::Allocate<TestStruct>(a, count_to_allocate, {});
CHECK_EQ(test_pointer, reinterpret_cast<TestStruct*>(NULL));
}
TEST(CPUAllocatorTest, AllocateOverflowSmallest) {
Allocator* a = cpu_allocator();
const size_t count_to_allocate =
(std::numeric_limits<size_t>::max() / sizeof(TestStruct)) + 1;
TestStruct* const test_pointer =
TypedAllocator::Allocate<TestStruct>(a, count_to_allocate, {});
CHECK_EQ(test_pointer, reinterpret_cast<TestStruct*>(NULL));
}
TEST(CPUAllocatorTest, Sizes) {
Allocator* a = cpu_allocator();
EXPECT_EQ(false, a->TracksAllocationSizes());
}
TEST(CPUAllocatorTest, ProfilerReporting) {
void* p = port::AlignedMalloc(8, 1);
const std::size_t alloc_size = port::MallocExtension_GetAllocatedSize(p);
port::AlignedFree(p);
if (alloc_size == 0) {
LOG(WARNING) << "Skipping Memory Debugging test. It requires "
<< "port::MallocExtension_GetAllocatedSize to work.";
return;
}
EnableCPUAllocatorStats();
Allocator* a = cpu_allocator();
void* p1 = a->AllocateRaw(1, 16);
std::unique_ptr<ProfilerSession> profiler =
tensorflow::ProfilerSession::Create(
tensorflow::ProfilerSession::DefaultOptions());
void* p2 = a->AllocateRaw(1, 32);
a->DeallocateRaw(p1);
tensorflow::profiler::XSpace xspace;
EXPECT_EQ(absl::OkStatus(), profiler->CollectData(&xspace));
const auto plane = ::tsl::profiler::FindPlaneWithName(
xspace, ::tensorflow::profiler::kHostThreadsPlaneName);
::tensorflow::profiler::XPlaneVisitor xplane(plane);
ASSERT_EQ(plane->name(), ::tensorflow::profiler::kHostThreadsPlaneName)
<< "XSpace: " << xspace.DebugString();
ASSERT_EQ(plane->event_metadata_size(), 2)
<< "XSpace: " << xspace.DebugString();
const auto& line = plane->lines(0);
ASSERT_EQ(line.events_size(), 2) << "XSpace: " << xspace.DebugString();
const auto& events = line.events();
::tensorflow::profiler::XEventVisitor e0(&xplane, &line, &events[0]);
EXPECT_EQ(e0.Name(), "MemoryAllocation")
<< "XSpace: " << xspace.DebugString();
{
absl::optional<std::string> bytes_allocated, peak_bytes_in_use,
requested_bytes, allocation_bytes;
e0.ForEachStat([&](const ::tensorflow::profiler::XStatVisitor& stat) {
LOG(ERROR) << "STAT " << stat.Name() << ": " << stat.ToString();
if (stat.Name() == "bytes_allocated") {
bytes_allocated = stat.ToString();
} else if (stat.Name() == "peak_bytes_in_use") {
peak_bytes_in_use = stat.ToString();
} else if (stat.Name() == "requested_bytes") {
requested_bytes = stat.ToString();
} else if (stat.Name() == "allocation_bytes") {
allocation_bytes = stat.ToString();
}
});
ASSERT_TRUE(bytes_allocated && peak_bytes_in_use && requested_bytes &&
allocation_bytes)
<< "XSpace: " << xspace.DebugString();
EXPECT_EQ(*bytes_allocated, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*peak_bytes_in_use, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*requested_bytes, "32") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*allocation_bytes, "32") << "XSpace: " << xspace.DebugString();
}
::tensorflow::profiler::XEventVisitor e1(&xplane, &line, &events[1]);
EXPECT_EQ(e1.Name(), "MemoryDeallocation")
<< "XSpace: " << xspace.DebugString();
{
absl::optional<std::string> bytes_allocated, peak_bytes_in_use,
allocation_bytes;
e1.ForEachStat([&](const ::tensorflow::profiler::XStatVisitor& stat) {
if (stat.Name() == "bytes_allocated") {
bytes_allocated = stat.ToString();
} else if (stat.Name() == "peak_bytes_in_use") {
peak_bytes_in_use = stat.ToString();
} else if (stat.Name() == "allocation_bytes") {
allocation_bytes = stat.ToString();
}
});
ASSERT_TRUE(bytes_allocated && peak_bytes_in_use && allocation_bytes)
<< "XSpace: " << xspace.DebugString();
EXPECT_EQ(*bytes_allocated, "32") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*peak_bytes_in_use, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*allocation_bytes, "16") << "XSpace: " << xspace.DebugString();
}
a->DeallocateRaw(p2);
DisableCPUAllocatorStats();
}
namespace {
AllocatorAttributes DeviceAllocatorAttribute() {
AllocatorAttributes attr;
attr.value |= (0x1 << 24);
return attr;
}
bool HasDeviceAllocatorAttribute(const AllocatorAttributes& attr) {
return attr.value & (0x1 << 24);
}
}
TEST(CustomAllocatorAttributes, TestSetterAndGetter) {
AllocatorAttributes attr = DeviceAllocatorAttribute();
EXPECT_TRUE(HasDeviceAllocatorAttribute(attr));
EXPECT_FALSE(HasDeviceAllocatorAttribute(AllocatorAttributes()));
}
static void BM_Allocation(::testing::benchmark::State& state) {
const int arg = state.range(0);
Allocator* a = cpu_allocator();
std::vector<int> sizes = {256, 4096, 16384, 524288, 512, 1048576};
int size_index = 0;
if (arg) EnableCPUAllocatorStats();
for (auto s : state) {
int bytes = sizes[size_index++ % sizes.size()];
void* p = a->AllocateRaw(1, bytes);
a->DeallocateRaw(p);
}
if (arg) DisableCPUAllocatorStats();
}
BENCHMARK(BM_Allocation)->Arg(0)->Arg(1);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac9824f0-e411-4d08-b0c7-6186c95f4a4c | cpp | tensorflow/tensorflow | obfuscate_names | tensorflow/tools/graph_transforms/obfuscate_names.cc | tensorflow/tools/graph_transforms/obfuscate_names_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status ObfuscateNames(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::unordered_set<string> required_nodes;
for (const string& input : context.input_names) {
required_nodes.insert(input);
}
for (const string& output : context.output_names) {
required_nodes.insert(output);
}
const string valid_chars =
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
const int64_t chars_size = valid_chars.size();
std::map<string, string> new_names;
int64_t name_index = 0;
for (const NodeDef& input_node : input_graph_def.node()) {
const string& old_name = input_node.name();
string new_name;
if (required_nodes.count(old_name)) {
new_name = old_name;
} else {
do {
int64_t remaining = name_index;
new_name = "";
while (true) {
const int64_t remainder = (remaining % chars_size);
const char current_char = valid_chars[remainder];
new_name = current_char + new_name;
remaining /= chars_size;
if (remaining <= 0) {
break;
}
}
++name_index;
} while (required_nodes.count(new_name));
}
new_names[old_name] = new_name;
}
output_graph_def->Clear();
for (const NodeDef& input_node : input_graph_def.node()) {
NodeDef* node = output_graph_def->mutable_node()->Add();
*node = input_node;
const string& old_name = input_node.name();
node->set_name(new_names[old_name]);
node->mutable_input()->Clear();
for (const string& input_name : input_node.input()) {
string prefix;
string input_node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &input_node_name, &suffix);
if (new_names.count(input_node_name) == 0) {
return errors::InvalidArgument("No node named ", input_node_name,
" for input to ", old_name);
}
string new_input_name = prefix + new_names[input_node_name] + suffix;
*(node->mutable_input()->Add()) = new_input_name;
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("obfuscate_names", ObfuscateNames);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status ObfuscateNames(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class ObfuscateNamesTest : public ::testing::Test {
protected:
void TestSimpleTree() {
GraphDef graph_def;
NodeDef* add_node1 = graph_def.add_node();
add_node1->set_name("add_node1");
add_node1->set_op("Add");
add_node1->add_input("add_node2");
add_node1->add_input("add_node3");
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node3");
add_node3->add_input("const_node4");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* const_node4 = graph_def.add_node();
const_node4->set_name("const_node4");
const_node4->set_op("Const");
GraphDef result;
TF_ASSERT_OK(
ObfuscateNames(graph_def, {{"const_node1"}, {"add_node1"}}, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node1"));
EXPECT_EQ(0, node_lookup.count("add_node2"));
EXPECT_EQ(0, node_lookup.count("add_node3"));
EXPECT_EQ(1, node_lookup.count("const_node1"));
EXPECT_EQ(0, node_lookup.count("const_node2"));
EXPECT_EQ(0, node_lookup.count("const_node3"));
EXPECT_EQ(0, node_lookup.count("const_node4"));
}
void TestManyNodes() {
GraphDef graph_def;
for (int i = 0; i < 1000; ++i) {
NodeDef* const_node = graph_def.add_node();
const_node->set_name(strings::StrCat("const_node", i));
const_node->set_op("Const");
}
GraphDef result;
TF_ASSERT_OK(ObfuscateNames(graph_def, {{"const_node0"}, {"const_node999"}},
&result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("const_node0"));
EXPECT_EQ(0, node_lookup.count("const_node500"));
EXPECT_EQ(1, node_lookup.count("const_node999"));
}
void TestNameClashes() {
GraphDef graph_def;
for (int i = 0; i < 1000; ++i) {
NodeDef* const_node = graph_def.add_node();
const_node->set_name(strings::StrCat("1", i));
const_node->set_op("Const");
}
GraphDef result;
TF_ASSERT_OK(ObfuscateNames(graph_def, {{"10"}, {"19"}}, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("10"));
EXPECT_EQ(1, node_lookup.count("19"));
std::unordered_set<string> names;
for (const NodeDef& node : result.node()) {
EXPECT_EQ(0, names.count(node.name()))
<< "Found multiple nodes with name '" << node.name() << "'";
names.insert(node.name());
}
}
};
TEST_F(ObfuscateNamesTest, TestSimpleTree) { TestSimpleTree(); }
TEST_F(ObfuscateNamesTest, TestManyNodes) { TestManyNodes(); }
TEST_F(ObfuscateNamesTest, TestNameClashes) { TestNameClashes(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/obfuscate_names.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/obfuscate_names_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95ca6da9-8737-4ac5-baea-d3ecf04420f1 | cpp | tensorflow/tensorflow | merge_padding_with | tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc | tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/matching.h"
namespace tflite {
namespace gpu {
namespace {
template <typename Attr>
class MergePaddingWith2DOperation : public SequenceTransformation {
public:
explicit MergePaddingWith2DOperation(OperationType operation_type)
: operations_to_match_(
{ToString(OperationType::PAD), ToString(operation_type)}) {}
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
if (!MatchesByOperationType(sequence, operations_to_match_)) {
return {TransformStatus::SKIPPED, ""};
}
Node* pad_node = sequence.front();
Node* op_node = sequence.back();
PadAttributes pad_attr =
absl::any_cast<PadAttributes>(pad_node->operation.attributes);
if (pad_attr.type != PaddingContentType::ZEROS) {
return {TransformStatus::DECLINED, "Only Zero padding is supported."};
}
if (pad_attr.appended.c != 0 || pad_attr.prepended.c != 0 ||
pad_attr.appended.b != 0 || pad_attr.prepended.b != 0) {
return {TransformStatus::DECLINED,
"Pad has non-zero padding on non HW axis."};
}
Attr* node_attr = absl::any_cast<Attr>(&op_node->operation.attributes);
absl::Status status = RemovePrecedingNode(graph, pad_node, op_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove Pad node with Operation node: " +
std::string(status.message())};
}
node_attr->padding.appended.h += pad_attr.appended.h;
node_attr->padding.appended.w += pad_attr.appended.w;
node_attr->padding.prepended.h += pad_attr.prepended.h;
node_attr->padding.prepended.w += pad_attr.prepended.w;
return {
TransformStatus::APPLIED,
absl::StrCat("Added padding: prepended = {h = ", pad_attr.prepended.h,
", w = ", pad_attr.prepended.w, "}, appended = { h = ",
pad_attr.appended.h, ", w = ", pad_attr.appended.w, "}")};
}
private:
const std::vector<std::string> operations_to_match_;
};
}
std::unique_ptr<SequenceTransformation> NewMergePaddingWithPooling() {
return absl::make_unique<MergePaddingWith2DOperation<Pooling2DAttributes>>(
OperationType::POOLING_2D);
}
std::unique_ptr<SequenceTransformation> NewMergePaddingWithConvolution2D() {
return absl::make_unique<
MergePaddingWith2DOperation<Convolution2DAttributes>>(
OperationType::CONVOLUTION_2D);
}
std::unique_ptr<SequenceTransformation>
NewMergePaddingWithDepthwiseConvolution() {
return absl::make_unique<
MergePaddingWith2DOperation<DepthwiseConvolution2DAttributes>>(
OperationType::DEPTHWISE_CONVOLUTION);
}
class MergePaddingWithAddOperation : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::PAD)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
if (input_shape.c % 4 != 0) {
return {TransformStatus::DECLINED,
"Pad with input where src_channels % 4 != 0"};
}
PadAttributes pad_attr =
absl::any_cast<PadAttributes>(node->operation.attributes);
if (pad_attr.type != PaddingContentType::ZEROS) {
return {TransformStatus::DECLINED, "Only Zero padding is supported."};
}
if (pad_attr.prepended != BHWC(0, 0, 0, 0) || pad_attr.appended.h != 0 ||
pad_attr.appended.w != 0 || pad_attr.appended.b != 0) {
return {TransformStatus::DECLINED,
"Pad has padding not only in appended channels axis."};
}
auto pad_output = graph->FindOutputs(node->id)[0];
auto consumer_nodes = graph->FindConsumers(pad_output->id);
if (consumer_nodes.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
auto add_node = consumer_nodes[0];
auto consumer_type = OperationTypeFromString(add_node->operation.type);
if (consumer_type != OperationType::ADD) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes add_attr =
absl::any_cast<ElementwiseAttributes>(add_node->operation.attributes);
const bool is_add_hwc =
absl::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(add_attr.param);
const bool is_add_linear =
absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
add_attr.param);
const bool is_add_scalar = absl::holds_alternative<float>(add_attr.param);
if (is_add_hwc || is_add_linear || is_add_scalar) {
return {TransformStatus::SKIPPED,
"Cannot remove padding when ADD has constant argument."};
}
absl::Status status = RemovePrecedingNode(graph, node, add_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove Pad node " + std::string(status.message())};
}
return {TransformStatus::APPLIED,
"Removed padding with zeroes in appended channels dimension"};
}
};
std::unique_ptr<NodeTransformation> NewMergePaddingWithAdd() {
return absl::make_unique<MergePaddingWithAddOperation>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MergePaddingWith, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto pad_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(pad_node->id, input->id).ok());
pad_node->operation.type = ToString(OperationType::PAD);
PadAttributes attr;
attr.prepended = BHWC(0, 1, 1, 0);
attr.appended = BHWC(0, 2, 2, 0);
pad_node->operation.attributes = attr;
auto conv_node = graph.NewNode();
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node, conv_node, &temp).ok());
ASSERT_TRUE(AddOutput(&graph, conv_node, &temp).ok());
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
Convolution2DAttributes conv_attr;
conv_attr.padding.appended = HW(0, 0);
conv_attr.padding.prepended = HW(0, 0);
conv_node->operation.attributes = conv_attr;
ASSERT_EQ(2, graph.nodes().size());
auto transformation = NewMergePaddingWithConvolution2D();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(conv_node, graph.nodes()[0]);
conv_attr =
absl::any_cast<Convolution2DAttributes>(conv_node->operation.attributes);
EXPECT_EQ(HW(1, 1), conv_attr.padding.prepended);
EXPECT_EQ(HW(2, 2), conv_attr.padding.appended);
}
TEST(MergePaddingWith, MergeTwo) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto pad_node1 = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(pad_node1->id, input->id).ok());
pad_node1->operation.type = ToString(OperationType::PAD);
PadAttributes attr;
attr.prepended = BHWC(0, 1, 1, 0);
attr.appended = BHWC(0, 0, 0, 0);
pad_node1->operation.attributes = attr;
auto pad_node2 = graph.NewNode();
Value* temp1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node1, pad_node2, &temp1).ok());
pad_node2->operation.type = ToString(OperationType::PAD);
attr.prepended = BHWC(0, 0, 0, 0);
attr.appended = BHWC(0, 2, 2, 0);
pad_node2->operation.attributes = attr;
auto conv_node = graph.NewNode();
Value* temp2 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node2, conv_node, &temp2).ok());
ASSERT_TRUE(AddOutput(&graph, conv_node, &temp2).ok());
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
Convolution2DAttributes conv_attr;
conv_attr.padding.appended = HW(0, 0);
conv_attr.padding.prepended = HW(0, 0);
conv_node->operation.attributes = conv_attr;
ASSERT_EQ(3, graph.nodes().size());
auto transformation = NewMergePaddingWithConvolution2D();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(conv_node, graph.nodes()[0]);
conv_attr =
absl::any_cast<Convolution2DAttributes>(conv_node->operation.attributes);
EXPECT_EQ(HW(1, 1), conv_attr.padding.prepended);
EXPECT_EQ(HW(2, 2), conv_attr.padding.appended);
}
TEST(MergePaddingWithAdd, MergeAlignedPadding) {
GraphFloat32 graph;
auto input0 = graph.NewValue();
input0->tensor.shape = BHWC(1, 4, 4, 8);
auto input1 = graph.NewValue();
auto padded = graph.NewValue();
auto output = graph.NewValue();
auto pad_node = graph.NewNode();
pad_node->operation.type = ToString(OperationType::PAD);
PadAttributes pad_attr;
pad_attr.prepended = BHWC(0, 0, 0, 0);
pad_attr.appended = BHWC(0, 0, 0, 32);
pad_node->operation.attributes = pad_attr;
ASSERT_TRUE(graph.AddConsumer(pad_node->id, input0->id).ok());
ASSERT_TRUE(graph.SetProducer(pad_node->id, padded->id).ok());
auto add_node = graph.NewNode();
ElementwiseAttributes add_attr;
ASSERT_TRUE(graph.AddConsumer(add_node->id, padded->id).ok());
ASSERT_TRUE(graph.AddConsumer(add_node->id, input1->id).ok());
ASSERT_TRUE(graph.SetProducer(add_node->id, output->id).ok());
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMergePaddingWithAdd();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
EXPECT_EQ(add_node, graph.nodes()[0]);
}
TEST(MergePaddingWithAdd, DoNotTrigger_AddWithAttributes) {
GraphFloat32 graph;
auto input0 = graph.NewValue();
input0->tensor.shape = BHWC(1, 4, 4, 8);
auto input1 = graph.NewValue();
auto padded = graph.NewValue();
auto output = graph.NewValue();
auto pad_node = graph.NewNode();
pad_node->operation.type = ToString(OperationType::PAD);
PadAttributes pad_attr;
pad_attr.prepended = BHWC(0, 0, 0, 0);
pad_attr.appended = BHWC(0, 0, 0, 32);
pad_node->operation.attributes = pad_attr;
ASSERT_TRUE(graph.AddConsumer(pad_node->id, input0->id).ok());
ASSERT_TRUE(graph.SetProducer(pad_node->id, padded->id).ok());
auto add_node = graph.NewNode();
ElementwiseAttributes add_attr;
add_attr.param = Tensor<HWC, DataType::FLOAT32>();
ASSERT_TRUE(graph.AddConsumer(add_node->id, padded->id).ok());
ASSERT_TRUE(graph.AddConsumer(add_node->id, input1->id).ok());
ASSERT_TRUE(graph.SetProducer(add_node->id, output->id).ok());
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMergePaddingWithAdd();
ModelTransformer transformer(&graph);
transformer.Apply("merge_padding", transformation.get());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
EXPECT_EQ(pad_node, graph.nodes()[0]);
EXPECT_EQ(add_node, graph.nodes()[1]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce81a04e-fca4-4ddf-96e3-daf1ae6b533d | cpp | tensorflow/tensorflow | traceme_recorder | third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder.cc | third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder_test.cc | #include "xla/tsl/profiler/backends/cpu/traceme_recorder.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/profiler/utils/lock_free_queue.h"
#include "xla/tsl/profiler/utils/per_thread.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
namespace internal {
#ifdef _WIN32
#define DECL_DLL_EXPORT __declspec(dllexport)
#else
#define DECL_DLL_EXPORT
#endif
DECL_DLL_EXPORT std::atomic<int> g_trace_level(
TraceMeRecorder::kTracingDisabled);
static_assert(ATOMIC_INT_LOCK_FREE == 2, "Assumed atomic<int> was lock free");
}
namespace {
class SplitEventTracker {
public:
void AddStart(TraceMeRecorder::Event&& event) {
DCHECK(event.IsStart());
start_events_.emplace(event.ActivityId(), std::move(event));
}
void AddEnd(TraceMeRecorder::Event* event) {
DCHECK(event->IsEnd());
if (!FindStartAndMerge(event)) {
end_events_.push_back(event);
}
}
void HandleCrossThreadEvents() {
for (auto* event : end_events_) {
FindStartAndMerge(event);
}
}
private:
bool FindStartAndMerge(TraceMeRecorder::Event* event) {
auto iter = start_events_.find(event->ActivityId());
if (iter == start_events_.end()) return false;
auto& start_event = iter->second;
event->name = std::move(start_event.name);
event->start_time = start_event.start_time;
start_events_.erase(iter);
return true;
}
absl::flat_hash_map<int64_t, TraceMeRecorder::Event> start_events_;
std::vector<TraceMeRecorder::Event*> end_events_;
};
class ThreadLocalRecorder {
public:
ThreadLocalRecorder() {
auto* env = Env::Default();
info_.tid = env->GetCurrentThreadId();
env->GetCurrentThreadName(&info_.name);
}
const TraceMeRecorder::ThreadInfo& Info() const { return info_; }
void Record(TraceMeRecorder::Event&& event) { queue_.Push(std::move(event)); }
void Clear() { queue_.Clear(); }
TF_MUST_USE_RESULT std::deque<TraceMeRecorder::Event> Consume(
SplitEventTracker* split_event_tracker) {
std::deque<TraceMeRecorder::Event> events;
std::optional<TraceMeRecorder::Event> event;
while ((event = queue_.Pop())) {
if (event->IsStart()) {
split_event_tracker->AddStart(*std::move(event));
continue;
}
events.push_back(*std::move(event));
if (events.back().IsEnd()) {
split_event_tracker->AddEnd(&events.back());
}
}
return events;
}
private:
TraceMeRecorder::ThreadInfo info_;
LockFreeQueue<TraceMeRecorder::Event> queue_;
};
}
void TraceMeRecorder::Clear() {
auto recorders = PerThread<ThreadLocalRecorder>::StartRecording();
for (auto& recorder : recorders) {
recorder->Clear();
};
}
TraceMeRecorder::Events TraceMeRecorder::Consume() {
TraceMeRecorder::Events result;
SplitEventTracker split_event_tracker;
auto recorders = PerThread<ThreadLocalRecorder>::StopRecording();
for (auto& recorder : recorders) {
auto events = recorder->Consume(&split_event_tracker);
if (!events.empty()) {
result.push_back({recorder->Info(), std::move(events)});
}
};
split_event_tracker.HandleCrossThreadEvents();
return result;
}
bool TraceMeRecorder::Start(int level) {
level = std::max(0, level);
int expected = kTracingDisabled;
bool started = internal::g_trace_level.compare_exchange_strong(
expected, level, std::memory_order_acq_rel);
if (started) {
Clear();
}
return started;
}
void TraceMeRecorder::Record(Event&& event) {
PerThread<ThreadLocalRecorder>::Get().Record(std::move(event));
}
TraceMeRecorder::Events TraceMeRecorder::Stop() {
TraceMeRecorder::Events events;
if (internal::g_trace_level.exchange(
kTracingDisabled, std::memory_order_acq_rel) != kTracingDisabled) {
events = Consume();
}
return events;
}
int64_t TraceMeRecorder::NewActivityId() {
static std::atomic<int32> thread_counter(1);
const thread_local static int32_t thread_id =
thread_counter.fetch_add(1, std::memory_order_relaxed);
thread_local static uint32 per_thread_activity_id = 0;
return static_cast<int64_t>(thread_id) << 32 | per_thread_activity_id++;
}
}
} | #include "xla/tsl/profiler/backends/cpu/traceme_recorder.h"
#include <atomic>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
namespace {
using ::testing::ElementsAre;
MATCHER_P(Named, name, "") { return arg.name == name; }
TEST(RecorderTest, SingleThreaded) {
int64_t start_time = GetCurrentTimeNanos();
int64_t end_time = start_time + UniToNano(1);
TraceMeRecorder::Record({"before", start_time, end_time});
TraceMeRecorder::Start(1);
TraceMeRecorder::Record({"during1", start_time, end_time});
TraceMeRecorder::Record({"during2", start_time, end_time});
auto results = TraceMeRecorder::Stop();
TraceMeRecorder::Record({"after", start_time, end_time});
ASSERT_EQ(results.size(), 1);
EXPECT_THAT(results[0].events,
ElementsAre(Named("during1"), Named("during2")));
}
TEST(RecorderTest, Multithreaded) {
constexpr static int kNumThreads = 4;
tsl::Notification start;
tsl::Notification stop;
thread::ThreadPool pool(tsl::Env::Default(), "testpool", kNumThreads);
std::atomic<int> thread_count = {0};
for (int i = 0; i < kNumThreads; i++) {
pool.Schedule([&start, &stop, &thread_count] {
uint64 j = 0;
bool was_active = false;
auto record_event = [&j]() {
int64_t start_time = GetCurrentTimeNanos();
int64_t end_time = start_time + UniToNano(1);
TraceMeRecorder::Record(
{absl::StrCat(j++), start_time, end_time});
};
thread_count.fetch_add(1, std::memory_order_relaxed);
start.WaitForNotification();
while (!stop.HasBeenNotified()) {
if (TraceMeRecorder::Active()) {
record_event();
was_active = true;
}
if (was_active && !TraceMeRecorder::Active()) {
record_event();
record_event();
was_active = false;
}
SpinForNanos(10);
}
});
}
struct ThreadState {
bool split_session = false;
bool overlapping_sessions = false;
std::set<uint64> events;
};
absl::flat_hash_map<uint32 , ThreadState> thread_state;
auto done = [&thread_state] {
for (const auto& id_and_thread : thread_state) {
auto& t = id_and_thread.second;
if (t.events.size() < 2) return false;
}
return true;
};
while (thread_count.load(std::memory_order_relaxed) < kNumThreads) {
LOG(INFO) << "Waiting for all threads to spin up...";
SleepForMillis(1);
}
start.Notify();
constexpr static int kMaxIters = 100;
for (int iters = 0; iters < kMaxIters && !done(); ++iters) {
LOG(INFO) << "Looping until convergence, iteration: " << iters;
TraceMeRecorder::Start(1);
SleepForMillis(100);
auto results = TraceMeRecorder::Stop();
for (const auto& thread : results) {
if (thread.events.empty()) continue;
auto& state = thread_state[thread.thread.tid];
std::set<uint64> session_events;
uint64 current = 0;
for (const auto& event : thread.events) {
uint64 activity_id;
ASSERT_TRUE(absl::SimpleAtoi(event.name, &activity_id));
session_events.emplace(activity_id);
if (current != 0 && activity_id != current + 1) {
state.split_session = true;
}
current = activity_id;
}
for (const auto& event : session_events) {
auto result = state.events.emplace(event);
if (!result.second) {
state.overlapping_sessions = true;
}
}
}
SleepForMillis(1);
}
stop.Notify();
for (const auto& id_and_thread : thread_state) {
auto& thread = id_and_thread.second;
EXPECT_FALSE(thread.split_session)
<< "Expected contiguous events in a session";
EXPECT_FALSE(thread.overlapping_sessions) << "Expected disjoint sessions";
EXPECT_GT(thread.events.size(), 1)
<< "Expected gaps in thread events between sessions";
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
122e2de0-23d3-441b-a4e9-2765e88d32d1 | cpp | tensorflow/tensorflow | topk_kernel | third_party/xla/xla/service/gpu/kernels/topk_kernel.cc | third_party/xla/xla/service/gpu/kernels/topk_kernel_test.cc | #include "xla/service/gpu/kernels/topk_kernel.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/kernels/topk_kernel_common.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
size_t NumThreads(size_t n, size_t k, size_t batch_size) {
size_t simultaneous_threads_per_block = 512 * (16 / k);
size_t threads_per_block =
std::min(simultaneous_threads_per_block, kTopKMaxThreadsPerBlock);
size_t min_slice = absl::bit_floor(n / absl::bit_ceil(k));
return std::min(threads_per_block, min_slice);
}
template <typename T>
absl::StatusOr<void*> GetKernel(int n, int k) {
if (k <= 1) return GetTopKKernelForK<T, 1>(n);
if (k <= 2) return GetTopKKernelForK<T, 2>(n);
if (k <= 4) return GetTopKKernelForK<T, 4>(n);
if (k <= 8) return GetTopKKernelForK<T, 8>(n);
if (k <= 16) return GetTopKKernelForK<T, 16>(n);
return absl::UnimplementedError(absl::StrCat("Unsupported K: ", k));
}
template <typename T>
absl::Status TypedTopK(se::Stream* stream, se::DeviceMemoryBase data,
size_t num_elements, se::DeviceMemoryBase top_elements,
se::DeviceMemoryBase top_indices, size_t k,
size_t batch_size) {
constexpr size_t max_kv_size = sizeof(uint64_t);
int shmem_size = absl::bit_ceil(k) * max_kv_size * GetTopKWaveFrontSize<T>();
int num_threads = NumThreads(num_elements, k, batch_size);
if (num_threads == 0) {
return absl::FailedPreconditionError(
"Invalid kernel parameters. This is likely a bug in the "
"TopkSpecializer.");
}
se::StreamExecutor* executor = stream->parent();
se::DeviceMemory<T> data_typed(data);
se::DeviceMemory<T> top_elements_typed(top_elements);
se::DeviceMemory<uint32_t> top_indices_typed(top_indices);
TF_ASSIGN_OR_RETURN(void* kernel_symbol, GetKernel<T>(num_elements, k));
TF_ASSIGN_OR_RETURN(
auto kernel,
(se::TypedKernelFactory<se::DeviceMemory<T>, size_t, se::DeviceMemory<T>,
se::DeviceMemory<uint32_t>,
size_t>::Create(executor, "topk",
kernel_symbol)));
TF_RETURN_IF_ERROR(stream->ThenLaunch(
se::ThreadDim(num_threads, 1, 1), se::BlockDim(batch_size, 1, 1),
shmem_size, kernel, data_typed, num_elements, top_elements_typed,
top_indices_typed, k));
return absl::OkStatus();
}
}
absl::Status RunTopk(se::Stream* stream, PrimitiveType dtype,
se::DeviceMemoryBase data, size_t num_elements,
se::DeviceMemoryBase top_elements,
se::DeviceMemoryBase top_indices, size_t k,
size_t batch_size) {
VLOG(2) << "TopK: " << primitive_util::LowercasePrimitiveTypeName(dtype)
<< ", n: " << num_elements << ", k: " << k << ", bs: " << batch_size;
switch (dtype) {
case PrimitiveType::F32:
return TypedTopK<float>(stream, data, num_elements, top_elements,
top_indices, k, batch_size);
case PrimitiveType::BF16:
return TypedTopK<bfloat16>(stream, data, num_elements, top_elements,
top_indices, k, batch_size);
default:
return absl::UnimplementedError("GpuTopK not implemented for this dtype");
}
}
} | #include "xla/service/gpu/kernels/topk_kernel.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <functional>
#include <tuple>
#include <vector>
#include "absl/log/check.h"
#include "absl/random/random.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::gpu {
namespace {
using se::gpu::GpuStreamHandle;
using ::testing::Combine;
using ::testing::Values;
template <typename T>
std::vector<T> RandomVecRange(int num_elements, T start, T end) {
std::vector<T> local;
local.reserve(num_elements);
thread_local absl::BitGen gen;
for (int i = 0; i < num_elements; ++i) {
local.push_back(absl::Uniform<T>(gen, start, end));
}
return local;
}
template <typename T>
std::vector<T> RandomVec(int num_elements) {
return RandomVecRange(num_elements, static_cast<T>(0),
static_cast<T>(num_elements));
}
template <typename T>
std::vector<T> RandomVecNegative(int num_elements) {
return RandomVecRange(num_elements, -static_cast<T>(num_elements),
static_cast<T>(0));
}
PrimitiveType Get(float) { return PrimitiveType::F32; }
se::StreamExecutor* GetGpuExecutor() {
auto* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
return platform->ExecutorForDevice(0).value();
}
using TopkTest = ::testing::TestWithParam<std::tuple<int, int, int, int>>;
TEST_P(TopkTest, TopKFloat) {
using T = float;
auto* executor = GetGpuExecutor();
auto stream = executor->CreateStream().value();
const auto [n_kb, k, batch_size, offset] = GetParam();
const size_t n = n_kb * 1024 + offset;
stream_executor::DeviceMemoryHandle input_buffer(
executor, executor->AllocateArray<T>(n * batch_size));
stream_executor::DeviceMemoryHandle output_values(
executor, executor->AllocateArray<T>(k * batch_size));
stream_executor::DeviceMemoryHandle output_indices(
executor, executor->AllocateArray<uint32_t>(k * batch_size));
ASSERT_TRUE(!(input_buffer.memory().is_null() ||
output_values.memory().is_null() ||
output_indices.memory().is_null()));
auto source = RandomVec<T>(n * batch_size);
CHECK_OK(stream->Memcpy(input_buffer.memory_ptr(), source.data(),
n * batch_size * sizeof(T)));
ASSERT_TRUE(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size)
.ok());
std::vector<T> got(k);
ASSERT_TRUE(stream->BlockHostUntilDone().ok());
for (int i = 0; i < batch_size; i++) {
CHECK_OK(stream->Memcpy(
got.data(),
se::DeviceMemory<T>(output_values.memory()).GetSlice(k * i, k),
k * sizeof(T)));
std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1));
std::sort(slice.begin(), slice.end(), std::greater<T>());
slice.resize(k);
EXPECT_THAT(got, ::testing::ElementsAreArray(slice))
<< " k=" << k << ", batch_size=" << batch_size << " i=" << i;
}
}
TEST_P(TopkTest, TopKPackedNegative) {
using T = float;
auto* executor = GetGpuExecutor();
auto stream = executor->CreateStream().value();
const auto [n_kb, k, batch_size, offset] = GetParam();
const size_t n = n_kb * 1024 + offset;
stream_executor::DeviceMemoryHandle input_buffer(
executor, executor->AllocateArray<T>(n * batch_size));
stream_executor::DeviceMemoryHandle output_values(
executor, executor->AllocateArray<T>(k * batch_size));
stream_executor::DeviceMemoryHandle output_indices(
executor, executor->AllocateArray<uint32_t>(k * batch_size));
ASSERT_TRUE(!(input_buffer.memory().is_null() ||
output_values.memory().is_null() ||
output_indices.memory().is_null()));
auto source = RandomVecNegative<T>(n * batch_size);
CHECK_OK(stream->Memcpy(input_buffer.memory_ptr(), source.data(),
n * batch_size * sizeof(T)));
ASSERT_TRUE(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size)
.ok());
std::vector<T> got(k);
ASSERT_TRUE(stream->BlockHostUntilDone().ok());
for (int i = 0; i < batch_size; i++) {
CHECK_OK(stream->Memcpy(
got.data(),
se::DeviceMemory<T>(output_values.memory()).GetSlice(k * i, k),
k * sizeof(T)));
std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1));
std::sort(slice.begin(), slice.end(), std::greater<T>());
slice.resize(k);
EXPECT_THAT(got, ::testing::ElementsAreArray(slice))
<< " k=" << k << ", batch_size=" << batch_size << " i=" << i;
}
}
INSTANTIATE_TEST_SUITE_P(TopkTests, TopkTest,
Combine(
Values(1, 8, 12, 64, 128),
Values(1, 2, 8, 16, 7, 12),
Values(1, 16, 64, 128),
Values(0, 7, 4)),
[](const auto& info) {
return absl::Substitute(
"n$0KiB_k$1_batch_size$2_offset$3",
std::get<0>(info.param), std::get<1>(info.param),
std::get<2>(info.param),
std::get<3>(info.param));
});
template <size_t K>
void BM_SmallTopk(benchmark::State& state) {
using T = float;
size_t k = K;
size_t batch_size = state.range(0);
size_t n = state.range(1) * 1024;
state.SetLabel(
absl::Substitute("n=$0Ki k=$1 batch_size=$2", n / 1024, k, batch_size));
auto* executor = GetGpuExecutor();
auto stream = executor->CreateStream().value();
stream_executor::DeviceMemoryHandle input_buffer(
executor, executor->AllocateArray<T>(n * batch_size));
stream_executor::DeviceMemoryHandle output_values(
executor, executor->AllocateArray<T>(k * batch_size));
stream_executor::DeviceMemoryHandle output_indices(
executor, executor->AllocateArray<uint32_t>(k * batch_size));
if (input_buffer.memory().is_null() || output_values.memory().is_null() ||
output_indices.memory().is_null()) {
state.SkipWithError("Unable to allocate GPU memory: aborting benchmark");
return;
}
auto source = RandomVec<T>(n);
for (size_t i = 0; i < batch_size; i++) {
auto slice = se::DeviceMemory<T>(input_buffer.memory()).GetSlice(i * n, n);
CHECK_OK(stream->Memcpy(&slice, source.data(), n * sizeof(T)));
}
for (auto _ : state) {
CHECK_OK(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size));
TF_ASSERT_OK_AND_ASSIGN(auto timer, stream->CreateEventBasedTimer(true));
CHECK_OK(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size));
auto timer_duration = timer->GetElapsedDuration();
CHECK_OK(timer_duration.status());
state.SetIterationTime(absl::ToDoubleSeconds(timer_duration.value()));
}
size_t items_processed = batch_size * n * state.iterations();
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(T));
}
BENCHMARK(BM_SmallTopk<1>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<2>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<4>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<8>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<16>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/topk_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/topk_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4f6349c-d229-4b92-b60f-2e85745f5542 | cpp | google/langsvr | builder | include/langsvr/json/builder.h | src/json/builder_test.cc | #ifndef LANGSVR_JSON_BUILDER_
#define LANGSVR_JSON_BUILDER_
#include <memory>
#include <type_traits>
#include "langsvr/json/value.h"
#include "langsvr/span.h"
#include "langsvr/traits.h"
namespace langsvr::json {
class Value;
}
namespace langsvr::json {
class Builder {
public:
virtual ~Builder();
static std::unique_ptr<Builder> Create();
virtual Result<const Value*> Parse(std::string_view json) = 0;
virtual const Value* Null() = 0;
virtual const Value* Bool(json::Bool value) = 0;
virtual const Value* I64(json::I64 value) = 0;
virtual const Value* U64(json::U64 value) = 0;
virtual const Value* F64(json::F64 value) = 0;
virtual const Value* String(json::String value) = 0;
const Value* String(std::string_view value) { return String(json::String(value)); }
const Value* String(const char* value) { return String(json::String(value)); }
virtual const Value* Array(Span<const Value*> elements) = 0;
struct Member {
json::String name;
const Value* value;
};
virtual const Value* Object(Span<Member> members) = 0;
template <typename T>
auto Create(T&& value) {
static constexpr bool is_bool = std::is_same_v<T, json::Bool>;
static constexpr bool is_i64 = std::is_integral_v<T> && std::is_signed_v<T>;
static constexpr bool is_u64 = std::is_integral_v<T> && std::is_unsigned_v<T>;
static constexpr bool is_f64 = std::is_floating_point_v<T>;
static constexpr bool is_string = IsStringLike<T>;
static_assert(is_bool || is_i64 || is_u64 || is_f64 || is_string);
if constexpr (is_bool) {
return Bool(std::forward<T>(value));
} else if constexpr (is_i64) {
return I64(static_cast<json::I64>(std::forward<T>(value)));
} else if constexpr (is_u64) {
return U64(static_cast<json::U64>(std::forward<T>(value)));
} else if constexpr (is_f64) {
return F64(static_cast<json::F64>(std::forward<T>(value)));
} else if constexpr (is_string) {
return String(std::forward<T>(value));
}
}
};
}
#endif | #include "langsvr/json/builder.h"
#include "gtest/gtest.h"
#include "src/utils/replace_all.h"
namespace langsvr::json {
namespace {
TEST(JsonBuilder, ParseNull) {
auto b = Builder::Create();
auto null_res = b->Parse("null");
ASSERT_EQ(null_res, Success);
auto& null = null_res.Get();
EXPECT_EQ(null->Kind(), json::Kind::kNull);
EXPECT_EQ(null->Json(), "null");
}
TEST(JsonBuilder, ParseBool) {
auto b = Builder::Create();
auto true_res = b->Parse("true");
ASSERT_EQ(true_res, Success);
auto& true_ = true_res.Get();
EXPECT_EQ(true_->Bool(), true);
EXPECT_EQ(true_->Kind(), json::Kind::kBool);
EXPECT_EQ(true_->Json(), "true");
}
TEST(JsonBuilder, ParseI64) {
auto b = Builder::Create();
auto i64_res = b->Parse("9223372036854775807");
ASSERT_EQ(i64_res, Success);
auto& i64 = i64_res.Get();
EXPECT_EQ(i64->I64(), static_cast<json::I64>(9223372036854775807));
EXPECT_EQ(i64->Kind(), json::Kind::kI64);
EXPECT_EQ(i64->Json(), "9223372036854775807");
}
TEST(JsonBuilder, ParseU64) {
auto b = Builder::Create();
auto u64_res = b->Parse("9223372036854775808");
ASSERT_EQ(u64_res, Success);
auto& u64 = u64_res.Get();
EXPECT_EQ(u64->U64(), static_cast<json::U64>(9223372036854775808u));
EXPECT_EQ(u64->Kind(), json::Kind::kU64);
EXPECT_EQ(u64->Json(), "9223372036854775808");
}
TEST(JsonBuilder, ParseF64) {
auto b = Builder::Create();
auto f64_res = b->Parse("42.0");
ASSERT_EQ(f64_res, Success);
auto& f64 = f64_res.Get();
EXPECT_EQ(f64->F64().Get(), 42.0);
EXPECT_EQ(f64->Kind(), json::Kind::kF64);
EXPECT_EQ(f64->Json(), "42.0");
}
TEST(JsonBuilder, ParseString) {
auto b = Builder::Create();
auto string_res = b->Parse("\"hello world\"");
ASSERT_EQ(string_res, Success);
auto& string_ = string_res.Get();
EXPECT_EQ(string_->String(), "hello world");
EXPECT_EQ(string_->Kind(), json::Kind::kString);
EXPECT_EQ(string_->Json(), "\"hello world\"");
}
TEST(JsonBuilder, ParseArray) {
auto b = Builder::Create();
auto arr_res = b->Parse("[10, false, \"fish\" ]");
ASSERT_EQ(arr_res, Success);
auto& arr = arr_res.Get();
EXPECT_EQ(arr->Kind(), json::Kind::kArray);
EXPECT_EQ(ReplaceAll(arr->Json(), " ", ""), "[10,false,\"fish\"]");
EXPECT_EQ(arr->Count(), 3u);
EXPECT_EQ(arr->Get<json::I64>(0u), static_cast<json::I64>(10));
EXPECT_EQ(arr->Get<json::Bool>(1u), false);
EXPECT_EQ(arr->Get<json::String>(2u), "fish");
auto oob = arr->Get(3);
EXPECT_NE(oob, Success);
}
TEST(JsonBuilder, ParseObject) {
auto b = Builder::Create();
auto root_res = b->Parse(R"({"cat": "meow", "ten": 10, "yes": true})");
ASSERT_EQ(root_res, Success);
auto& root = root_res.Get();
EXPECT_EQ(root->Kind(), json::Kind::kObject);
EXPECT_EQ(ReplaceAll(root->Json(), " ", ""), R"({"cat":"meow","ten":10,"yes":true})");
EXPECT_EQ(root->Count(), 3u);
EXPECT_EQ(root->Get<json::String>("cat"), "meow");
EXPECT_EQ(root->Get<json::I64>("ten"), static_cast<json::I64>(10));
EXPECT_EQ(root->Get<json::Bool>("yes"), true);
auto missing = root->Get("missing");
EXPECT_NE(missing, Success);
}
TEST(JsonBuilder, CreateNull) {
auto b = Builder::Create();
auto v = b->Null();
EXPECT_EQ(v->Kind(), json::Kind::kNull);
EXPECT_EQ(v->Json(), "null");
}
TEST(JsonBuilder, CreateBool) {
auto b = Builder::Create();
auto v = b->Bool(true);
EXPECT_EQ(v->Kind(), json::Kind::kBool);
EXPECT_EQ(v->Json(), "true");
}
TEST(JsonBuilder, CreateI64) {
auto b = Builder::Create();
auto v = b->I64(static_cast<json::I64>(9223372036854775807));
EXPECT_EQ(v->Kind(), json::Kind::kI64);
EXPECT_EQ(v->Json(), "9223372036854775807");
}
TEST(JsonBuilder, CreateU64) {
auto b = Builder::Create();
auto v = b->U64(static_cast<json::U64>(9223372036854775808ul));
EXPECT_EQ(v->Kind(), json::Kind::kU64);
EXPECT_EQ(v->Json(), "9223372036854775808");
}
TEST(JsonBuilder, CreateF64) {
auto b = Builder::Create();
auto v = b->F64(static_cast<json::F64>(42.0));
EXPECT_EQ(v->Kind(), json::Kind::kF64);
EXPECT_EQ(v->Json(), "42.0");
}
TEST(JsonBuilder, CreateString) {
auto b = Builder::Create();
auto v = b->String("hello world");
EXPECT_EQ(v->Kind(), json::Kind::kString);
EXPECT_EQ(v->Json(), "\"hello world\"");
}
TEST(JsonBuilder, CreateArray) {
auto b = Builder::Create();
std::vector elements{
b->I64(10),
b->Bool(false),
b->String("fish"),
};
auto v = b->Array(elements);
EXPECT_EQ(v->Kind(), json::Kind::kArray);
EXPECT_EQ(ReplaceAll(v->Json(), " ", ""), R"([10,false,"fish"])");
}
TEST(JsonBuilder, CreateObject) {
auto b = Builder::Create();
std::vector members{
Builder::Member{"cat", b->String("meow")},
Builder::Member{"ten", b->I64(10)},
Builder::Member{"yes", b->Bool(true)},
};
auto v = b->Object(members);
EXPECT_EQ(v->Kind(), json::Kind::kObject);
EXPECT_EQ(ReplaceAll(v->Json(), " ", ""), R"({"cat":"meow","ten":10,"yes":true})");
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/include/langsvr/json/builder.h | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/json/builder_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
f8fa38a8-c603-4c99-9ab0-8297cb05f5a3 | cpp | tensorflow/tensorflow | delegate | tensorflow/lite/delegates/flex/delegate.cc | tensorflow/lite/delegates/flex/delegate_test.cc | #include "tensorflow/lite/delegates/flex/delegate.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#include "tensorflow/lite/delegates/flex/buffer_map.h"
#include "tensorflow/lite/delegates/flex/kernel.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
TfLiteDelegateUniquePtr FlexDelegate::Create(
std::unique_ptr<FlexDelegate> base_delegate) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Created TensorFlow Lite delegate for select TF ops.");
if (base_delegate == nullptr) {
base_delegate.reset(new FlexDelegate());
}
auto flex_delegate = TfLiteDelegateFactory::Create(std::move(base_delegate));
flex_delegate->flags |= kTfLiteDelegateFlagsAllowDynamicTensors;
flex_delegate->flags |= kTfLiteDelegateFlagsPerOperatorProfiling;
reinterpret_cast<FlexDelegate*>(flex_delegate->data_)->base_delegate_ =
flex_delegate.get();
return flex_delegate;
}
TfLiteStatus FlexDelegate::Initialize(TfLiteContext* context) {
tensorflow::SessionOptions session_options;
session_options.config.set_inter_op_parallelism_threads(-1);
if (context->recommended_num_threads > 0) {
session_options.config.set_intra_op_parallelism_threads(
context->recommended_num_threads);
}
auto status = delegate_data_.Prepare(
session_options, reinterpret_cast<Subgraph*>(context->impl_),
base_delegate_);
if (!status.ok()) {
TF_LITE_KERNEL_LOG(context, "Failed to initialize TensorFlow context: %s",
absl::StatusMessageAsCStr(status));
return kTfLiteError;
}
if (!cancellation_manager_) {
cancellation_manager_ = std::make_unique<tensorflow::CancellationManager>();
delegate_data_.SetCancellationManager(cancellation_manager_.get());
}
return kTfLiteOk;
}
const char* FlexDelegate::Name() const {
static constexpr char kName[] = "TfLiteFlexDelegate";
return kName;
}
bool FlexDelegate::IsNodeSupportedByDelegate(
const TfLiteRegistration* registration, const TfLiteNode* node,
TfLiteContext* context) const {
return IsFlexOp(registration->custom_name);
}
std::unique_ptr<SimpleDelegateKernelInterface>
FlexDelegate::CreateDelegateKernelInterface() {
return std::unique_ptr<SimpleDelegateKernelInterface>(
new tflite::flex::DelegateKernel());
}
TfLiteStatus FlexDelegate::CopyFromBufferHandle(
TfLiteContext* context, TfLiteBufferHandle buffer_handle,
TfLiteTensor* output) {
flex::BufferMap* buffer_map = delegate_data_.GetBufferMap(context);
if (!buffer_map->HasTensor(buffer_handle)) {
TF_LITE_KERNEL_LOG(context, "Invalid tensor index %d.", buffer_handle);
return kTfLiteError;
}
tensorflow::Tensor t = buffer_map->GetTensor(buffer_handle);
if (output->type == kTfLiteString) {
if (t.dtype() != tensorflow::DT_STRING) {
TF_LITE_KERNEL_LOG(context,
"Inconsistent type for TF string tensor index %d.",
buffer_handle);
return kTfLiteError;
}
DynamicBuffer dynamic_buffer;
auto tf_data = t.flat<tensorflow::tstring>();
for (int i = 0; i < t.NumElements(); ++i) {
dynamic_buffer.AddString(tf_data(i).data(), tf_data(i).size());
}
dynamic_buffer.WriteToTensor(output, nullptr);
return kTfLiteOk;
}
if (IsResourceOrVariant(output)) {
const size_t required_bytes = sizeof(tensorflow::Tensor**);
const tensorflow::Tensor** tf_tensor_ptr =
reinterpret_cast<const tensorflow::Tensor**>(malloc(required_bytes));
*tf_tensor_ptr = buffer_map->GetTensorPtr(buffer_handle);
TfLiteTensorDataFree(output);
output->data.raw = reinterpret_cast<char*>(tf_tensor_ptr);
output->bytes = required_bytes;
output->data_is_stale = true;
return kTfLiteOk;
}
tensorflow::StringPiece t_data = t.tensor_data();
if (output->bytes != t_data.size()) {
TF_LITE_KERNEL_LOG(context,
absl::StrCat("The given ", output->bytes,
" bytes are not enough to store "
"TensorFlow's aligned buffer of size ",
t_data.size(), " bytes.")
.c_str());
return kTfLiteError;
}
memcpy(output->data.raw, t_data.data(), t_data.size());
return kTfLiteOk;
}
void FlexDelegate::Cancel() { cancellation_manager_->StartCancel(); }
bool FlexDelegate::HasCancelled(void* data) {
if (data == nullptr) {
return false;
}
auto* flex_delegate = static_cast<FlexDelegate*>(data);
return flex_delegate->cancellation_manager_->IsCancelled();
}
} | #include "tensorflow/lite/delegates/flex/delegate.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/flex/test_util.h"
#include "tensorflow/lite/shared_library.h"
namespace tflite {
namespace flex {
namespace {
using ::testing::ElementsAre;
class DelegateTest : public testing::FlexModelTest {
public:
DelegateTest() : delegate_(FlexDelegate::Create()) {
flex_delegate_ = static_cast<FlexDelegate*>(delegate_->data_);
interpreter_ = std::make_unique<Interpreter>(&error_reporter_);
}
~DelegateTest() override {
interpreter_.reset();
delegate_.reset();
}
void ConfigureDelegate() {
interpreter_->SetCancellationFunction(flex_delegate_,
FlexDelegate::HasCancelled);
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
}
void Cancel() { flex_delegate_->Cancel(); }
private:
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)> delegate_;
FlexDelegate* flex_delegate_;
};
TEST_F(DelegateTest, FullGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
}
TEST_F(DelegateTest, NonFloatTypeInference) {
AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetTypedValues<int>(0, {1, 2, 3, 4});
SetShape(1, {2, 2});
SetTypedValues<int>(1, {4, 3, 2, 1});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
ASSERT_THAT(GetTypedValues<int>(2), ElementsAre(5, 5, 5, 5));
ASSERT_EQ(GetType(2), kTfLiteInt32);
}
TEST_F(DelegateTest, StringInference) {
AddTensors(3, {0, 1}, {2}, kTfLiteString, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetStringValues(0, {"1", "2", "3", "4"});
SetShape(1, {2, 2});
SetStringValues(1, {"4", "3", "2", "1"});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
ASSERT_THAT(GetStringValues(2), ElementsAre("14", "23", "32", "41"));
ASSERT_EQ(GetType(2), kTfLiteString);
}
TEST_F(DelegateTest, MixedGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfLiteMulOp({6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
}
TEST_F(DelegateTest, SplitGraph) {
AddTensors(10, {0}, {9}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kAdd, {1, 2}, {3});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfLiteMulOp({4, 5}, {6});
AddTfOp(testing::kUnpack, {6}, {7, 8});
AddTfOp(testing::kAdd, {7, 8}, {9});
ConfigureDelegate();
SetShape(0, {2, 2, 2, 1});
SetValues(0, {3.0f, 1.0f, 0.5f, -1.0f, 0.0f, 1.0f, 1.5f, 3.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(9), ElementsAre(1));
ASSERT_THAT(GetValues(9), ElementsAre(10.0f));
}
TEST_F(DelegateTest, OnlyTFLite) {
AddTensors(10, {0, 1}, {2}, kTfLiteFloat32, {3});
AddTfLiteMulOp({0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(1, {2, 2, 1});
SetValues(1, {1.0f, 2.0f, 3.0f, 4.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2, 1));
ASSERT_THAT(GetValues(2), ElementsAre(1.1f, 4.4f, 9.9f, 17.6f));
}
TEST_F(DelegateTest, MultipleInvokeCalls) {
AddTensors(10, {0, 1}, {2}, kTfLiteFloat32, {3});
AddTfLiteMulOp({0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(1, {2, 2, 1});
SetValues(1, {1.0f, 2.0f, 3.0f, 4.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2, 1));
ASSERT_THAT(GetValues(2), ElementsAre(1.1f, 4.4f, 9.9f, 17.6f));
SetShape(0, {2, 2, 1});
SetValues(1, {4.0f, 3.0f, 2.0f, 1.0f});
SetShape(1, {2, 2, 1});
SetValues(0, {4.4f, 3.3f, 2.2f, 1.1f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2, 1));
ASSERT_THAT(GetValues(2), ElementsAre(17.6f, 9.9f, 4.4f, 1.1f));
}
TEST_F(DelegateTest, MultipleInterpretersSameDelegate) {
{
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
}
std::unique_ptr<Interpreter> interpreter(new Interpreter(&error_reporter_));
interpreter_.swap(interpreter);
{
AddTensors(10, {0}, {9}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kAdd, {1, 2}, {3});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfLiteMulOp({4, 5}, {6});
AddTfOp(testing::kUnpack, {6}, {7, 8});
AddTfOp(testing::kAdd, {7, 8}, {9});
ConfigureDelegate();
SetShape(0, {2, 2, 2, 1});
SetValues(0, {3.0f, 1.0f, 0.5f, -1.0f, 0.0f, 1.0f, 1.5f, 3.0f});
}
interpreter_.swap(interpreter);
{
ASSERT_TRUE(Invoke());
EXPECT_THAT(GetShape(8), ElementsAre(2, 1));
EXPECT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
}
interpreter_.swap(interpreter);
{
ASSERT_TRUE(Invoke());
EXPECT_THAT(GetShape(9), ElementsAre(1));
EXPECT_THAT(GetValues(9), ElementsAre(10.0f));
}
}
TEST_F(DelegateTest, SingleThreaded) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
interpreter_->SetNumThreads(1);
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
}
TEST_F(DelegateTest, MultiThreaded) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
interpreter_->SetNumThreads(4);
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
}
#if !defined(__ANDROID__)
TEST_F(DelegateTest, TF_AcquireFlexDelegate) {
auto TF_AcquireFlexDelegate =
reinterpret_cast<Interpreter::TfLiteDelegatePtr (*)()>(
SharedLibrary::GetSymbol("TF_AcquireFlexDelegate"));
ASSERT_TRUE(TF_AcquireFlexDelegate);
auto delegate_ptr = TF_AcquireFlexDelegate();
ASSERT_TRUE(delegate_ptr != nullptr);
}
#endif
TEST_F(DelegateTest, StaticOutput) {
AddTensors(7, {0, 1, 2, 3}, {6}, kTfLiteFloat32, {2});
AddTfOp(testing::kAdd, {0, 2}, {4});
AddTfOp(testing::kAdd, {1, 3}, {5});
AddTfOp(testing::kMul, {4, 5}, {6});
ConfigureDelegate();
SetShape(0, {2});
SetShape(1, {2});
SetShape(2, {2});
SetShape(3, {2});
SetValues(0, {1.1f, 2.2f});
SetValues(1, {3.3f, 4.4f});
SetValues(2, {1.1f, 2.2f});
SetValues(3, {3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(6), ElementsAre(2));
ASSERT_THAT(GetValues(6), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(6), kTfLiteFloat32);
ASSERT_FALSE(IsDynamicTensor(6));
}
TEST_F(DelegateTest, StaticOutputRFFT) {
AddTensors(4, {0, 1}, {3}, kTfLiteFloat32, {3, 257});
int32_t rfft_length[] = {512};
SetConstTensor(1, {1}, kTfLiteInt32,
reinterpret_cast<const char*>(&rfft_length),
sizeof(rfft_length));
AddTfOp(testing::kRfft, {0, 1}, {2});
AddTfOp(testing::kImag, {2}, {3});
ConfigureDelegate();
SetShape(0, {3, 512});
SetValues(0, std::vector<float>(3 * 512, 1.0f));
ASSERT_TRUE(Invoke());
ASSERT_EQ(GetType(3), kTfLiteFloat32);
ASSERT_FALSE(IsDynamicTensor(3));
}
TEST_F(DelegateTest, DynamicOutputAfterReshape) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ConfigureDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
ASSERT_EQ(GetType(8), kTfLiteFloat32);
ASSERT_TRUE(IsDynamicTensor(8));
}
TEST_F(DelegateTest, TestCancellation1) {
AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetTypedValues<int>(0, {1, 2, 3, 4});
SetShape(1, {2, 2});
SetTypedValues<int>(1, {4, 3, 2, 1});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
ASSERT_THAT(GetTypedValues<int>(2), ElementsAre(5, 5, 5, 5));
ASSERT_EQ(GetType(2), kTfLiteInt32);
Cancel();
ASSERT_FALSE(Invoke());
EXPECT_EQ(error_reporter_.error_messages(),
"Client requested cancel during Invoke()");
}
TEST_F(DelegateTest, TestCancellation2) {
AddTensors(2, {0}, {1}, kTfLiteBool, {1});
AddTfOp(testing::kLoopCond, {0}, {1});
ConfigureDelegate();
SetShape(0, {1});
ASSERT_TRUE(Invoke());
Cancel();
ASSERT_FALSE(Invoke());
EXPECT_EQ(error_reporter_.error_messages(),
"Client requested cancel during Invoke()");
}
TEST_F(DelegateTest, TestCancellationTwoThreads) {
AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
AddTfOp(testing::kAdd, {0, 1}, {2});
ConfigureDelegate();
SetShape(0, {2, 2});
SetTypedValues<int>(0, {1, 2, 3, 4});
SetShape(1, {2, 2});
SetTypedValues<int>(1, {4, 3, 2, 1});
std::thread invoke_thread([this]() {
bool result = true;
result = this->Invoke();
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
result = this->Invoke();
ASSERT_FALSE(result);
});
std::thread cancel_thread([this]() { this->Cancel(); });
invoke_thread.join();
cancel_thread.join();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e782c79f-e9a8-483b-9862-b4e5938a0e81 | cpp | tensorflow/tensorflow | reduction_splitter | third_party/xla/xla/service/gpu/transforms/reduction_splitter.cc | third_party/xla/xla/service/gpu/transforms/reduction_splitter_test.cc | #include "xla/service/gpu/transforms/reduction_splitter.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReductionSplitterVisitor : public DfsHloRewriteVisitor {
public:
explicit ReductionSplitterVisitor(bool ignore_small_dims)
: ignore_small_dims_(ignore_small_dims) {}
absl::Status HandleReduce(HloInstruction *reduce) override {
VLOG(4) << "Input: " << reduce->ToString();
if (IsReductionFromOrToContiguousDimensions(*reduce)) {
VLOG(4) << "Reduction with contiguous dimensions. Return.";
return absl::OkStatus();
}
if (reduce->dimensions().size() < 2) {
return absl::OkStatus();
}
if (!reduce->shape().IsArray()) {
return absl::OkStatus();
}
HloInstruction *operand = reduce->mutable_operand(0);
const Shape &shape = operand->shape();
CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape))
<< "Default layout should be enforced on reduction operand";
for (int64_t i = 0; i < reduce->dimensions().size(); ++i) {
for (int64_t j = i + 1; j < reduce->dimensions().size(); ++j) {
CHECK(abs(reduce->dimensions(i) - reduce->dimensions(j)) > 1)
<< "Reduction dimensions must not be consecutive";
}
}
int64_t max_shape_dim = 0;
int64_t max_reduce_dim = 0;
const auto &input_shape = reduce->operand(0)->shape();
for (int64_t i = 0; i < reduce->dimensions().size(); ++i) {
if (input_shape.dimensions(reduce->dimensions(i)) > max_shape_dim) {
max_reduce_dim = reduce->dimensions(i);
max_shape_dim = input_shape.dimensions(max_reduce_dim);
}
}
if (ignore_small_dims_ && max_shape_dim <= 8) {
return absl::OkStatus();
}
VLOG(3) << "Splitting reduction " << reduce->name() << " at dimension "
<< max_reduce_dim;
std::vector<int64_t> pre_reduce_dims;
pre_reduce_dims.push_back(max_reduce_dim);
std::vector<int64_t> pre_reduce_shape_dims(input_shape.dimensions().begin(),
input_shape.dimensions().end());
pre_reduce_shape_dims.erase(pre_reduce_shape_dims.begin() + max_reduce_dim);
Shape pre_reduce_shape = ShapeUtil::MakeShape(
reduce->shape().element_type(), pre_reduce_shape_dims);
std::unique_ptr<HloInstruction> pre_reduce = HloInstruction::CreateReduce(
pre_reduce_shape, reduce->mutable_operand(0),
reduce->mutable_operand(1), pre_reduce_dims, reduce->to_apply());
pre_reduce->set_metadata(reduce->metadata());
std::vector<int64_t> final_reduce_dims(reduce->dimensions().begin(),
reduce->dimensions().end());
final_reduce_dims.erase(
std::remove(final_reduce_dims.begin(), final_reduce_dims.end(),
max_reduce_dim),
final_reduce_dims.end());
for (int64_t i = 0; i < final_reduce_dims.size(); ++i) {
if (final_reduce_dims[i] > max_reduce_dim) {
final_reduce_dims[i]--;
}
}
std::unique_ptr<HloInstruction> final_reduce = HloInstruction::CreateReduce(
reduce->shape(),
reduce->parent()->AddInstruction(std::move(pre_reduce)),
reduce->mutable_operand(1), final_reduce_dims, reduce->to_apply());
return ReplaceWithNewInstruction(reduce, std::move(final_reduce));
}
private:
bool ignore_small_dims_;
};
absl::StatusOr<bool> ReductionSplitter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
ReductionSplitterVisitor(ignore_small_dims_)
.RunOnModule(module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/transforms/reduction_splitter.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class ReductionSplitterTest : public HloTestBase {};
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionTwo) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f16[6,16,512,64]{3,2,1,0} parameter(0)
transpose.1781 = f16[6,512,16,64]{3,1,2,0} transpose(param_0), dimensions={0,2,1,3}
convert.6986 = f32[6,512,16,64]{3,1,2,0} convert(transpose.1781)
bitcast.2136 = f32[6,16,512,64]{3,2,1,0} bitcast(convert.6986)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(bitcast.2136, constant_11111), dimensions={0,2}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(true).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({2}));
EXPECT_THAT(pre_reduction->shape(), ShapeUtil::MakeShape(F32, {6, 16, 64}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionZero) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[1024,16,512,64,128]{4,3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(param_0, constant_11111), dimensions={2,0,4}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(pre_reduction->shape(),
ShapeUtil::MakeShape(F32, {16, 512, 64, 128}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({1, 3}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, DontSplitReductionWithSmallDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[16,8,1024,8]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,1024]{1,0} reduce(param_0, constant_11111), dimensions={3,1}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(true).Run(module.get()).value());
EXPECT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
}
TEST_F(ReductionSplitterTest, DontSplitReductionsWithContiguousDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[128,128,64,128]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[128,64]{1,0} reduce(param_0, constant_11111), dimensions={3,0}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(false).Run(module.get()).value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d759e3dd-49f9-433a-a301-b7317ffa4f80 | cpp | tensorflow/tensorflow | root_instruction_sinker | third_party/xla/xla/service/root_instruction_sinker.cc | third_party/xla/xla/service/root_instruction_sinker_test.cc | #include "xla/service/root_instruction_sinker.h"
#include "xla/service/tuple_util.h"
namespace xla {
namespace {
void SinkTupleRoot(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
CHECK(root->shape().IsTuple());
HloInstruction* new_root = TupleUtil::Duplicate(root);
HloInstructionSequence& sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
for (HloInstruction* operand : new_root->operands()) {
sequence.push_back(operand);
}
sequence.push_back(new_root);
computation->set_root_instruction(new_root);
}
void SinkNontupleRoot(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
CHECK(!root->shape().IsTuple());
HloInstruction* new_root = computation->AddInstruction(
HloInstruction::CreateBitcast(root->shape(), root));
HloInstructionSequence& sequence =
computation->parent()->schedule().GetOrCreateSequence(computation);
sequence.push_back(new_root);
computation->set_root_instruction(new_root);
}
}
absl::StatusOr<bool> RootInstructionSinker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_RET_CHECK(module->has_schedule());
bool modified = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
HloInstructionSequence& sequence =
module->schedule().GetOrCreateSequence(computation);
if (computation->root_instruction() ==
sequence.instructions().at(sequence.size() - 1)) {
continue;
}
if (computation->root_instruction()->shape().IsTuple()) {
SinkTupleRoot(computation);
} else {
SinkNontupleRoot(computation);
}
modified = true;
}
return modified;
}
} | #include "xla/service/root_instruction_sinker.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using RootInstructionSinkerTest = HloTestBase;
TEST_F(RootInstructionSinkerTest, TupleNoChange) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto while_body =
module->entry_computation()->root_instruction()->while_body();
int num_body_instructions = while_body->instruction_count();
RootInstructionSinker sinker;
EXPECT_FALSE(sinker.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()
->root_instruction()
->while_body()
->instruction_count(),
num_body_instructions);
}
TEST_F(RootInstructionSinkerTest, Tuple) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
after-all = token[] after-all()
send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1
send-done = token[] send-done(send), channel_id=1
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RootInstructionSinker sinker;
EXPECT_TRUE(sinker.Run(module.get()).value());
auto while_body =
module->entry_computation()->root_instruction()->while_body();
const auto& sequence = module->schedule().sequence(while_body);
EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),
while_body->root_instruction());
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::GetTupleElement(op::Tuple()),
op::GetTupleElement(op::Tuple())));
}
TEST_F(RootInstructionSinkerTest, NontupleNoChange) {
absl::string_view hlo_string = R"(
HloModule Call, is_scheduled=true
Call {
param = s32[3]{0} parameter(0)
ROOT multiply = s32[3]{0} multiply(param, param)
}
ENTRY While {
constant.4 = s32[3]{0} constant({0, 1, 2})
ROOT call = s32[3]{0} call(constant.4), to_apply=Call
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto called_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
int num_instructions = called_computation->instruction_count();
RootInstructionSinker sinker;
EXPECT_FALSE(sinker.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()
->root_instruction()
->called_computations()[0]
->instruction_count(),
num_instructions);
}
TEST_F(RootInstructionSinkerTest, Nontuple) {
absl::string_view hlo_string = R"(
HloModule Call, is_scheduled=true
Call {
param = s32[3]{0} parameter(0)
ROOT multiply = s32[3]{0} multiply(param, param)
after-all = token[] after-all()
send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1
send-done = token[] send-done(send), channel_id=1
}
ENTRY While {
constant.4 = s32[3]{0} constant({0, 1, 2})
ROOT call = s32[3]{0} call(constant.4), to_apply=Call
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RootInstructionSinker sinker;
EXPECT_TRUE(sinker.Run(module.get()).value());
auto called_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const auto& sequence = module->schedule().sequence(called_computation);
EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),
called_computation->root_instruction());
EXPECT_THAT(called_computation->root_instruction(),
op::Bitcast(op::Multiply()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c68067e9-e2ae-494f-9ab3-807b249c39ba | cpp | tensorflow/tensorflow | node_order | tensorflow/compiler/mlir/tensorflow/translate/node_order.cc | tensorflow/compiler/mlir/tensorflow/translate/node_order_test.cc | #include "tensorflow/compiler/mlir/tensorflow/translate/node_order.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
void TopologicalOrdering(
const Graph& g, const std::function<void(Node*)>& emit,
const std::function<std::string(Node*)>& get_grouping_key) {
std::unordered_map<std::string, int> group_key_string_to_integer;
absl::flat_hash_map<Node*, int> node_to_group;
absl::flat_hash_map<Node*, int> remaining_incoming_nodes;
absl::flat_hash_map<Node*, int> node_to_position;
using Ready = std::vector<Node*>;
std::vector<Ready> group_members_that_are_ready;
std::set<int> groups_that_are_ready;
int i = 0;
DFS(
g, [](Node*) {},
[&](Node* n) {
std::string group_key_string = get_grouping_key(n);
auto entry = group_key_string_to_integer.try_emplace(
group_key_string, group_key_string_to_integer.size());
int group_key = entry.first->second;
node_to_position[n] = i++;
node_to_group[n] = group_key;
if (entry.second) {
group_members_that_are_ready.push_back({});
}
auto in_nodes = n->in_nodes();
int num_incoming = std::distance(in_nodes.begin(), in_nodes.end());
remaining_incoming_nodes[n] = num_incoming;
if (num_incoming == 0) {
group_members_that_are_ready[group_key].push_back(n);
groups_that_are_ready.emplace(group_key);
}
},
[](const Node* n1, const Node* n2) { return n1->name() < n2->name(); });
assert(group_key_string_to_integer.size() ==
group_members_that_are_ready.size());
int num_nodes = remaining_incoming_nodes.size();
int current_group = 0;
for (int i = 0; i < num_nodes; i++) {
if (groups_that_are_ready.find(current_group) ==
groups_that_are_ready.end()) {
current_group = *groups_that_are_ready.begin();
}
int size = group_members_that_are_ready[current_group].size();
assert(size);
Node* node = group_members_that_are_ready[current_group][--size];
group_members_that_are_ready[current_group].pop_back();
if (size == 0) {
groups_that_are_ready.erase(current_group);
}
emit(node);
auto out_nodes = node->out_nodes();
std::vector<Node*> nodes_sorted(out_nodes.begin(), out_nodes.end());
std::sort(nodes_sorted.begin(), nodes_sorted.end(), [&](Node* a, Node* b) {
return node_to_position[a] < node_to_position[b];
});
for (Node* out : nodes_sorted) {
remaining_incoming_nodes[out]--;
if (remaining_incoming_nodes[out] == 0) {
int group_key = node_to_group[out];
if (group_members_that_are_ready[group_key].empty()) {
groups_that_are_ready.emplace(group_key);
}
group_members_that_are_ready[group_key].push_back(out);
}
}
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/translate/node_order.h"
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
REGISTER_OP("TestUnary").Input("a: float").Output("o: float");
REGISTER_OP("TestTwoOutputs").Output("a: float").Output("b: float");
REGISTER_OP("TestBinary")
.Input("a: float")
.Input("b: float")
.Output("o: float");
bool ExpectBefore(const std::vector<std::pair<string, string>>& ordered_pairs,
const std::vector<Node*>& inputs, string* error) {
for (const std::pair<string, string>& pair : ordered_pairs) {
const string& before_node = pair.first;
const string& after_node = pair.second;
bool seen_before = false;
bool seen_both = false;
for (const Node* node : inputs) {
if (!seen_before && after_node == node->name()) {
*error = std::string("Saw ") + after_node + std::string(" before ") +
before_node;
return false;
}
if (before_node == node->name()) {
seen_before = true;
} else if (after_node == node->name()) {
seen_both = seen_before;
break;
}
}
if (!seen_both) {
*error = std::string("didn't see either ") + before_node +
std::string(" or ") + after_node;
return false;
}
}
return true;
}
TEST(AlgorithmTest, TopologicalOrdering) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestParams", b.opts().WithName("n1"));
Node* n2 =
SourceOp("TestParams", b.opts().WithName("n2").WithControlInput(n1));
Node* n3 =
SourceOp("TestParams", b.opts().WithName("n3").WithControlInput(n2));
Node* n4 = BinaryOp("TestMul", n1, {n3, 0}, b.opts().WithName("n4"));
Node* n5 = BinaryOp("TestMul", n1, {n3, 0},
b.opts().WithName("n5").WithControlInput(n1));
Node* n6 = BinaryOp("TestMul", n2, {n3, 0}, b.opts().WithName("n6"));
n3->set_requested_device("a");
n4->set_requested_device("a");
n5->set_requested_device("b");
n6->set_requested_device("b");
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<std::pair<string, string>> desired_order = {
{"n1", "n2"},
{"n2", "n3"},
{"n3", "n4"},
{"n1", "n4"},
{"n1", "n5"},
{"n2", "n6"},
{"n3", "n4"},
{"n3", "n5"},
{"n3", "n6"},
};
string error;
EXPECT_TRUE(ExpectBefore(desired_order, order, &error)) << error;
}
TEST(AlgorithmTest, TopologicalOrderingOnShallowTree) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestParams", b.opts().WithName("n1").WithDevice("a"));
Node* n2 =
SourceOp("TestParams",
b.opts().WithName("n2").WithDevice("b").WithControlInput(n1));
Node* n3 =
SourceOp("TestParams",
b.opts().WithName("n3").WithDevice("c").WithControlInput(n2));
Node* n4 =
SourceOp("TestParams",
b.opts().WithName("n4").WithDevice("a").WithControlInput(n1));
Node* n5 =
SourceOp("TestParams",
b.opts().WithName("n5").WithDevice("b").WithControlInput(n2));
Node* n6 =
SourceOp("TestParams",
b.opts().WithName("n6").WithDevice("c").WithControlInput(n3));
Node* n7 =
SourceOp("TestParams",
b.opts().WithName("n7").WithDevice("a").WithControlInput(n4));
Node* n8 =
SourceOp("TestParams",
b.opts().WithName("n8").WithDevice("b").WithControlInput(n5));
Node* n9 =
SourceOp("TestParams",
b.opts().WithName("n9").WithDevice("c").WithControlInput(n6));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<Node*> desired_order = {
g.source_node(), n1, n4, n7, n2, n5, n8, n3, n6, n9, g.sink_node()};
for (int i = 0; i < desired_order.size(); i++) {
desired_order[i] = g.FindNodeId(desired_order[i]->id());
}
EXPECT_EQ(order, desired_order);
}
TEST(AlgorithmTest, TopologicalOrderingGivesTheSameResultIfCalledTwice) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
SourceOp("TestParams", b.opts().WithName("n1"));
SourceOp("TestParams", b.opts().WithName("n2"));
SourceOp("TestParams", b.opts().WithName("n3"));
SourceOp("TestParams", b.opts().WithName("n4"));
SourceOp("TestParams", b.opts().WithName("n5"));
SourceOp("TestParams", b.opts().WithName("n6"));
SourceOp("TestParams", b.opts().WithName("n7"));
SourceOp("TestParams", b.opts().WithName("n8"));
SourceOp("TestParams", b.opts().WithName("n9"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order1;
std::vector<Node*> order2;
TopologicalOrdering(
g, [&](Node* n) { order1.push_back(n); },
[&](const Node* node) { return std::string("same"); });
TopologicalOrdering(
g, [&](Node* n) { order2.push_back(n); },
[&](const Node* node) { return std::string("same"); });
EXPECT_EQ(order1, order2);
}
TEST(AlgorithmTest, TopologicalOrderingOnChain) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestParams", b.opts().WithName("n1"));
Node* n2 = UnaryOp("TestUnary", n1, b.opts().WithName("n2"));
Node* n3 = UnaryOp("TestUnary", n2, b.opts().WithName("n3"));
Node* n4 = UnaryOp("TestUnary", n3, b.opts().WithName("n4"));
Node* n5 = UnaryOp("TestUnary", n4, b.opts().WithName("n5"));
Node* n6 = UnaryOp("TestUnary", n5, b.opts().WithName("n6"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<Node*> desired_order = {g.source_node(), n1, n2, n3, n4, n5, n6,
g.sink_node()};
for (int i = 0; i < desired_order.size(); i++) {
desired_order[i] = g.FindNodeId(desired_order[i]->id());
}
EXPECT_EQ(order, desired_order);
}
TEST(AlgorithmTest, TopologicalOrderingOnMultipleOutputs) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestTwoOutputs", b.opts().WithName("n1"));
UnaryOp("TestUnary", {n1, 0}, b.opts().WithName("n2"));
UnaryOp("TestUnary", {n1, 1}, b.opts().WithName("n3"));
UnaryOp("TestUnary", {n1, 0}, b.opts().WithName("n4"));
UnaryOp("TestUnary", {n1, 1}, b.opts().WithName("n5"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<std::pair<string, string>> desired_order = {
{"n1", "n2"},
{"n1", "n3"},
{"n1", "n4"},
{"n1", "n5"},
};
string error;
EXPECT_TRUE(ExpectBefore(desired_order, order, &error)) << error;
}
TEST(AlgorithmTest, TopologicalOrderingSameAsReversePostOrder) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n = SourceOp("TestTwoOutputs", b.opts().WithName("n"));
Node* n0 = UnaryOp("TestUnary", {n, 0}, b.opts().WithName("n2"));
Node* n1 = UnaryOp("TestUnary", {n, 1}, b.opts().WithName("n1"));
UnaryOp("TestUnary", n0, b.opts().WithName("n1a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n8a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n2a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n7a"));
UnaryOp("TestUnary", n1, b.opts().WithName("n1b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n8b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n2b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n7b"));
UnaryOp("TestUnary", n0, b.opts().WithName("n3a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n6a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n4a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n5a"));
UnaryOp("TestUnary", n1, b.opts().WithName("n3b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n6b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n4b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n5b"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<Node*> desired_order;
GetReversePostOrder(g, &desired_order, [](const Node* n1, const Node* n2) {
return n1->name() < n2->name();
});
EXPECT_EQ(desired_order, order);
}
TEST(AlgorithmTest, TopologicalOrderingWithEachDeviceUsedOnce) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
SourceOp("TestParams", b.opts().WithName("n1").WithDevice("a"));
SourceOp("TestParams", b.opts().WithName("n2").WithDevice("b"));
SourceOp("TestParams", b.opts().WithName("n3").WithDevice("c"));
SourceOp("TestParams", b.opts().WithName("n4").WithDevice("d"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
int count = 0;
TopologicalOrdering(g, [&](Node* n) { count++; }, GroupByDevice());
EXPECT_EQ(count, 6);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/node_order.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/node_order_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6fbcfd3c-c07b-407f-91b7-5614abd60af0 | cpp | tensorflow/tensorflow | execute | tensorflow/core/tfrt/mlrt/interpreter/execute.cc | tensorflow/core/common_runtime/eager/execute_test.cc | #include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/register_span.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tsl/profiler/lib/traceme.h"
namespace mlrt {
namespace {
struct CurrentExecutionInfo {
ExecutionContext* current_context = nullptr;
ExecutionContext* ready_context = nullptr;
};
CurrentExecutionInfo& GetCurrentExecutionInfo() {
static thread_local CurrentExecutionInfo current_execution_info;
return current_execution_info;
}
void Resume(ExecutionContext& ready_context) {
auto& current_execution_info = GetCurrentExecutionInfo();
auto* current_context = current_execution_info.current_context;
if ((current_context != nullptr) &&
(current_execution_info.ready_context == nullptr) &&
(current_context->state() == ExecutionContext::State::kReturn) &&
(current_context->function_stack_size() == 1)) {
current_execution_info.ready_context = &ready_context;
} else {
auto* work_queue = ready_context.work_queue();
DCHECK(work_queue);
work_queue->AddTask([&ready_context]() { Execute(ready_context); });
}
}
}
namespace execute_internal {
void UnwindOnError(ExecutionContext& context, int64_t pc);
}
void Execute(ExecutionContext& ctx) {
auto& current_execution_info = GetCurrentExecutionInfo();
current_execution_info.ready_context = &ctx;
for (; current_execution_info.ready_context;) {
current_execution_info.current_context =
current_execution_info.ready_context;
current_execution_info.ready_context = nullptr;
auto& context = *current_execution_info.current_context;
DCHECK(!context.function_stack_.empty());
int function_stack_index = context.function_stack_.size() - 1;
FunctionContext* current_function = &context.function_stack_.back();
int64_t pc = current_function->pc_;
auto kernels = context.loaded_executable().kernels();
auto kernel_object_iter =
current_function->function_object().kernels().begin();
kernel_object_iter += pc;
KernelFrame::State kstate(current_function);
KernelFrame frame(&kstate);
for (; context.state_ == ExecutionContext::State::kRunning; ++pc) {
DCHECK(kernel_object_iter <
current_function->function_object().kernels().end());
bc::Kernel kernel_object = *kernel_object_iter;
frame.set_kernel(kernel_object);
kernels[kernel_object.code()](frame);
++kernel_object_iter;
}
current_function = &context.function_stack_[function_stack_index];
current_function->pc_ = pc;
current_execution_info.current_context = nullptr;
switch (context.state_) {
case ExecutionContext::State::kReady: {
DCHECK(current_execution_info.ready_context == nullptr);
context.state_ = ExecutionContext::State::kRunning;
if (current_function->kernel_context().reenter) {
current_function->pc_--;
}
current_execution_info.ready_context = &context;
break;
}
case ExecutionContext::State::kRunning:
LOG(FATAL) << "This cannot happen.";
break;
case ExecutionContext::State::kReturn: {
tsl::profiler::TraceMe trace_me("Execute::Return");
context.function_stack_.pop_back();
if (context.function_stack_.empty()) {
if (context.exit_handler_) {
std::move(context.exit_handler_)();
}
break;
}
DCHECK(current_execution_info.ready_context == nullptr);
context.state_ = ExecutionContext::State::kRunning;
current_execution_info.ready_context = &context;
break;
}
case ExecutionContext::State::kSuspended: {
DCHECK(current_execution_info.ready_context == nullptr);
tsl::profiler::TraceMe trace_me("Execute::Suspend");
DCHECK(context.suspend_handler_);
std::move(context.suspend_handler_)([&context]() { Resume(context); });
return;
}
case ExecutionContext::State::kError: {
DCHECK(current_execution_info.ready_context == nullptr);
tsl::profiler::TraceMe trace_me("Execute::Error");
execute_internal::UnwindOnError(context, -1);
return;
}
}
}
}
namespace execute_internal {
void UnwindOnError(ExecutionContext& context, int64_t pc) {
std::string function_name;
if (!context.function_stack_.empty()) {
function_name = context.function_stack_.back().function_object().name();
}
context.LogError(context.status());
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: start from function ", function_name,
" with stack size: ", context.function_stack_.size(), " at pc: ", pc,
" for context ", absl::Hex(reinterpret_cast<std::uintptr_t>(&context)),
" at state ", context.state_)));
while (!context.function_stack_.empty()) {
DCHECK(context.state_ == ExecutionContext::State::kError);
FunctionContext* current_function = &context.function_stack_.back();
Value context_value(&context);
if (pc == -1) {
DCHECK(context.state_ == ExecutionContext::State::kError);
++pc;
RegisterSpan input_reg_span(
current_function->function_object().input_regs(),
current_function->regs());
for (Value& reg : input_reg_span) {
reg.HandleError(context_value);
if (context.state_ != ExecutionContext::State::kError) {
DCHECK(context.state_ == ExecutionContext::State::kSuspended);
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: entering state", context.state_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)))));
--pc;
break;
}
}
}
context.LogError(absl::InternalError(
absl::StrCat("UnwindOnError: unwinding function from ", pc, " to ",
current_function->pc_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)),
" at state ", context.state_)));
for (; context.state_ == ExecutionContext::State::kError &&
pc <= current_function->pc_;
++pc) {
bc::Kernel kernel = current_function->function_object().kernels()[pc];
RegisterSpan reg_span(kernel.results(), current_function->regs());
for (Value& reg : reg_span) {
reg.HandleError(context_value);
if (context.state_ != ExecutionContext::State::kError) {
DCHECK(context.state_ == ExecutionContext::State::kSuspended);
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: entering state", context.state_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)))));
--pc;
break;
}
}
}
if (context.state_ == ExecutionContext::State::kSuspended) {
DCHECK(context.suspend_handler_)
<< "suspend_handler_ must be populated when the state is set to "
"kSuspended.";
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: suspended state ", context.state_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)))));
std::move(context.suspend_handler_)([&context, pc]() {
auto* work_queue = context.work_queue();
DCHECK(work_queue);
work_queue->AddTask([&context, pc]() {
context.state_ = ExecutionContext::State::kError;
UnwindOnError(context, pc);
});
});
return;
}
DCHECK(context.state_ != ExecutionContext::State::kSuspended);
pc = -1;
context.function_stack_.pop_back();
}
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: done for function ", function_name,
" for context: ", absl::Hex(reinterpret_cast<std::uintptr_t>(&context)),
" at state ", context.state_)));
if (context.exit_handler_) {
std::move(context.exit_handler_)();
}
}
}
} | #include "tensorflow/core/common_runtime/eager/execute.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(ExecuteTest, EagerOperationAsFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
ctx->SetRunEagerOpAsFunction(true);
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
"Mul",
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input1_tensor = test::AsScalar<int64_t>(3);
auto input1 = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input1_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input1.get()));
Tensor input2_tensor = test::AsScalar<int64_t>(2);
auto input2 = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input2_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input2.get()));
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, SimpleFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 1);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, SimpleFunctionInt32BadFullType) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr,
nullptr, nullptr,
true);
const Tensor kTwo = test::AsScalar<int32_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int32"},
{"y: int32"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT32}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT32}, {"DstT", DT_INT32}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT32}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int32_t>(3);
ASSERT_NE(ctx->HostCPUName().c_str(), nullptr);
Device* d = nullptr;
TF_ASSERT_OK(ctx->FindDeviceFromName(ctx->HostCPUName().c_str(), &d));
auto input = core::RefCountPtr<TensorHandle>(
TensorHandle::CreateLocalHandle(std::move(input_tensor), d,
nullptr, ctx));
TF_ASSERT_OK(op->AddInput(input.get()));
FullTypeDef ft;
ft.set_type_id(TFT_TENSOR);
input.get()->SetFullType(ft);
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
Status status = EagerExecute(op.get(), retvals.data(), &num_retvals);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
EXPECT_TRUE(
absl::StrContains(status.message(), "TFT_TENSOR has 0 args instead of 1"))
<< "Actual: " << status.message();
ASSERT_EQ(retvals[0], nullptr);
ctx->Unref();
}
TEST(ExecuteTest, CompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
TF_ASSERT_OK(op->SetAttrBool("_XlaMustCompile", true));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(top_level_counter.Delta("CPU"), 1);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, NestedCompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
const string call_function_name = "FunctionCall";
const FunctionDef function_call = FunctionDefHelper::Define(
call_function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"_XlaMustCompile", true},
{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"XTimesTwo", {{"T", DT_INT64}})}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(function_call));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
call_function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 0);
EXPECT_EQ(top_level_counter.Delta("CPU"), 0);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, MultipleNestedCompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
const string call_function_name = "FunctionCall";
FunctionDef function_call = FunctionDefHelper::Define(
call_function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"_XlaMustCompile", true},
{"_device", "/job:localhost/replica:0/task:0/device:CPU:0"},
{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"XTimesTwo", {{"T", DT_INT64}})}}},
});
for (auto& node_def : *function_call.mutable_node_def()) {
if (node_def.op() == "StatefulPartitionedCall") {
node_def.set_device("/job:localhost/replica:0/task:0/device:CPU:0");
}
}
TF_ASSERT_OK(ctx->AddFunctionDef(function_call));
const string call_function_name2 = "FunctionCall2";
const FunctionDef function_call2 = FunctionDefHelper::Define(
call_function_name2,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"FunctionCall", {{"T", DT_INT64}})}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(function_call2));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
call_function_name2.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 0);
EXPECT_EQ(top_level_counter.Delta("CPU"), 0);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/execute.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/execute_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e640441a-92c9-460e-bb15-7f5030fe035e | cpp | tensorflow/tensorflow | triton | third_party/xla/xla/service/gpu/fusions/triton.cc | third_party/xla/xla/service/gpu/fusions/triton_test.cc | #include "xla/service/gpu/fusions/triton.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/triton/triton_fusion_emitter.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<TritonWrapperResult>
TritonFusion::GenerateTritonKernelAndWrapper(
const HloFusionInstruction& fusion, absl::string_view impl_fn_name,
const se::DeviceDescription& device_info, llvm::Module* llvm_module,
mlir::MLIRContext* mlir_context) const {
const se::GpuComputeCapability& cc = device_info.gpu_compute_capability();
auto backend_config =
fusion.backend_config<GpuBackendConfig>()->fusion_backend_config();
absl::string_view fusion_kind = backend_config.kind();
TritonWrapperResult triton_wrapper_result;
if (fusion_kind == kTritonFusionKind) {
std::optional<LaunchConfig> launch_config = this->launch_config();
if (!launch_config.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Block level fusion config is required for Triton fusions: ",
fusion.ToString()));
}
TF_ASSIGN_OR_RETURN(triton_wrapper_result,
TritonWrapper(impl_fn_name, &fusion, cc, device_info,
launch_config->block_level_parameters,
llvm_module, *mlir_context));
} else {
CHECK_EQ(fusion_kind, kTritonGemmFusionKind);
BlockLevelParameters block_level_parameters;
if (!backend_config.has_triton_gemm_config()) {
block_level_parameters.num_ctas = 1;
block_level_parameters.num_stages = 1;
block_level_parameters.num_warps = 2;
} else {
const auto& triton_config = backend_config.triton_gemm_config();
block_level_parameters.num_ctas = triton_config.num_ctas();
block_level_parameters.num_stages = triton_config.num_stages();
block_level_parameters.num_warps = triton_config.num_warps();
}
TF_ASSIGN_OR_RETURN(
triton_wrapper_result,
TritonWrapper(impl_fn_name, &fusion, cc, device_info,
block_level_parameters, llvm_module, *mlir_context));
}
return triton_wrapper_result;
};
absl::StatusOr<FusionEmissionResult> TritonFusion::Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const {
llvm::IRBuilder builder(ir_emitter_context.llvm_module()->getContext());
VLOG(3) << fusion.ToString();
std::string suggested_kernel_name = std::string(fusion.name());
TF_ASSIGN_OR_RETURN(
auto kernel_arguments,
KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion));
const HloComputation* hlo_computation =
fusion.fused_instructions_computation();
auto generate = [&]() -> absl::StatusOr<KernelReuseCache::Entry> {
VLOG(3) << "Generating: " << suggested_kernel_name;
const std::string impl_fn_name =
ir_emitter_context.name_uniquer()->GetUniqueName(
llvm_ir::SanitizeFunctionName(
absl::StrCat(suggested_kernel_name, "_impl")));
TF_ASSIGN_OR_RETURN(
TritonWrapperResult triton_wrapper_result,
GenerateTritonKernelAndWrapper(fusion, impl_fn_name,
ir_emitter_context.gpu_device_info(),
ir_emitter_context.llvm_module(),
ir_emitter_context.mlir_context()));
auto backend_config =
fusion.backend_config<GpuBackendConfig>()->fusion_backend_config();
absl::string_view fusion_kind = backend_config.kind();
LaunchDimensions launch_dimensions;
if (fusion_kind == kTritonFusionKind) {
std::optional<LaunchConfig> launch_config = this->launch_config();
CHECK(launch_config.has_value());
launch_dimensions = std::move(launch_config->launch_dimensions);
} else {
CHECK_EQ(fusion_kind, kTritonGemmFusionKind);
BlockLevelParameters block_level_parameters;
if (!backend_config.has_triton_gemm_config()) {
LOG(WARNING) << "Using fallback triton GEMM config for op "
<< fusion.name();
auto& triton_config = *backend_config.mutable_triton_gemm_config();
triton_config.set_block_m(64);
triton_config.set_block_k(64);
triton_config.set_block_n(64);
triton_config.set_split_k(1);
triton_config.set_num_stages(1);
triton_config.set_num_warps(2);
triton_config.set_num_ctas(1);
}
TF_ASSIGN_OR_RETURN(
TritonGemmConfig config,
TritonGemmConfig::FromProto(backend_config.triton_gemm_config()));
TF_ASSIGN_OR_RETURN(auto analysis, TritonFusionAnalysis::Execute(
*hlo_computation, config.split_k));
TF_ASSIGN_OR_RETURN(
launch_dimensions,
GetMatMulLaunchDimensions(analysis, analysis_.fusion(), config));
}
llvm::Function* impl_fn =
ir_emitter_context.llvm_module()->getFunction(impl_fn_name);
TF_RET_CHECK(impl_fn);
llvm::Function* kernel;
std::vector<llvm_ir::IrArray> inputs;
std::vector<llvm_ir::IrArray> outputs;
TF_ASSIGN_OR_RETURN(
std::tie(kernel, inputs, outputs),
BuildKernelPrototype(ir_emitter_context, suggested_kernel_name,
kernel_arguments.args(), impl_fn->arg_size(),
launch_dimensions, &builder));
llvm::Function* prototype_func = builder.GetInsertBlock()->getParent();
prototype_func->splice(prototype_func->begin(), impl_fn);
for (const auto& [arg, ir_array] : llvm::zip(impl_fn->args(), inputs)) {
arg.replaceAllUsesWith(ir_array.GetBasePointer());
}
impl_fn->eraseFromParent();
return {{kernel->getName().str(), launch_dimensions,
triton_wrapper_result.cluster_dim,
triton_wrapper_result.shmem_bytes}};
};
auto [status_or_entry, was_cached] =
ir_emitter_context.kernel_cache().GetWithStatus(
hlo_computation, kernel_arguments.args(),
"", generate);
TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry);
FusionEmissionResult result;
result.thunks.emplace_back(std::make_unique<KernelThunk>(
&fusion, entry->kernel_name, kernel_arguments.args(),
entry->launch_dimensions, entry->cluster_dim, entry->shmem_bytes));
return result;
}
std::optional<TritonFusion::LaunchConfig> TritonFusion::launch_config() const {
if (analysis_.fusion_backend_config().has_block_level_fusion_config()) {
BlockLevelParameters block_level_parameters =
BlockLevelParameters::FromBlockLevelFusionConfig(
analysis_.fusion_backend_config().block_level_fusion_config());
int64_t num_blocks = 1;
for (auto [dim_size, dim_tile_size] :
llvm::zip(analysis_.fusion_root(0).shape().dimensions(),
block_level_parameters.output_tile_sizes)) {
num_blocks *= (dim_size + dim_tile_size - 1) / dim_tile_size;
}
LaunchConfig launch_config;
launch_config.launch_dimensions = LaunchDimensions{
static_cast<uint64_t>(num_blocks),
static_cast<uint64_t>(block_level_parameters.num_warps * WarpSize())};
launch_config.block_level_parameters = std::move(block_level_parameters);
return launch_config;
}
return std::nullopt;
}
}
} | #include "xla/service/gpu/fusions/triton.h"
#include <memory>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::tsl::testing::StatusIs;
class TritonFusionTest : public HloTestBase {};
TEST_F(TritonFusionTest,
TritonFusionWithBlockLevelFusionConfig_LaunchConfigIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
triton_computation {
param_0 = f32[125,127] parameter(0)
ROOT abs = f32[125,127] abs(param_0)
}
ENTRY entry_computation {
param_0 = f32[125,127] parameter(0)
ROOT fusion.1 = f32[125,127] fusion(param_0), kind=kCustom,
calls=triton_computation,
backend_config={"fusion_backend_config":{
"kind":"__triton",
"block_level_fusion_config":{"output_tile_sizes":["3","127"],
"num_warps":"4"}}}
})"));
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
HloFusionAnalysis analysis = HloFusionAnalysis::Create(*root, device_info);
std::unique_ptr<FusionInterface> emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis});
auto triton_fusion = dynamic_cast<TritonFusion*>(emitter.get());
ASSERT_NE(triton_fusion, nullptr);
std::optional<TritonFusion::LaunchConfig> launch_config =
triton_fusion->launch_config();
ASSERT_NE(launch_config, std::nullopt);
EXPECT_EQ(launch_config->launch_dimensions.num_blocks(),
42);
EXPECT_EQ(launch_config->launch_dimensions.num_threads_per_block(),
128);
EXPECT_THAT(launch_config->block_level_parameters.output_tile_sizes,
ElementsAre(3, 127));
}
TEST_F(TritonFusionTest,
TritonFusionWithoutBlockLevelFusionConfig_LaunchConfigIsNullopt) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
triton_computation {
param_0 = f32[125,127] parameter(0)
ROOT abs = f32[125,127] abs(param_0)
}
ENTRY entry_computation {
param_0 = f32[125,127] parameter(0)
ROOT fusion = f32[125,127] fusion(param_0), kind=kCustom,
calls=triton_computation,
backend_config={"fusion_backend_config":{"kind":"__triton"}}
})"));
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
HloFusionAnalysis analysis = HloFusionAnalysis::Create(*root, device_info);
std::unique_ptr<FusionInterface> emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis});
auto triton_fusion_emitter = dynamic_cast<TritonFusion*>(emitter.get());
ASSERT_NE(triton_fusion_emitter, nullptr);
EXPECT_EQ(triton_fusion_emitter->launch_config(), std::nullopt);
mlir::MLIRContext mlir_context;
EXPECT_THAT(triton_fusion_emitter->GenerateTritonKernelAndWrapper(
*::xla::Cast<HloFusionInstruction>(root), "random_name",
device_info, nullptr, &mlir_context),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07907015-2bf1-4447-b999-7e04d57e2766 | cpp | tensorflow/tensorflow | bfloat16_propagation | third_party/xla/xla/service/bfloat16_propagation.cc | third_party/xla/xla/service/bfloat16_propagation_test.cc | #include "xla/service/bfloat16_propagation.h"
#include "absl/algorithm/container.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
BFloat16Propagation::BFloat16Propagation(const FloatSupport* bfloat16_support)
: bfloat16_support_(bfloat16_support) {
DCHECK_EQ(bfloat16_support->LowPrecisionType(), BF16);
}
void BFloat16Propagation::DetermineFusionComputationPrecision(
HloInstruction* fusion) {
CHECK_EQ(fusion->opcode(), HloOpcode::kFusion);
if (!bfloat16_support_->SupportsMixedPrecisions(*fusion)) {
return;
}
auto root = fusion->fused_instructions_computation()->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(fusion, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(root, index, BF16);
VLOG(2) << "Fused root " << root->ToString() << " at shape index "
<< index << " changed to BF16 precision for fusion "
<< fusion->ToString();
}
});
auto insts =
fusion->fused_instructions_computation()->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(
fusion->fused_instructions_computation());
RevertIfFusionInternalBF16Changes(fusion);
}
void BFloat16Propagation::RevertIfFusionInternalBF16Changes(
HloInstruction* fusion) {
auto has_changes = [this](HloInstruction* inst) {
auto it = changes_to_bf16_.find(inst);
return it != changes_to_bf16_.end() && !it->second.empty();
};
auto root = fusion->fused_instructions_computation()->root_instruction();
absl::flat_hash_set<const HloValue*> changed_root_buffers;
auto root_changes_it = changes_to_bf16_.find(root);
if (root_changes_it != changes_to_bf16_.end()) {
for (const auto& entry : root_changes_it->second) {
for (const HloValue* value :
dataflow_->GetValueSet(root, entry.second).values()) {
changed_root_buffers.insert(value);
}
}
}
auto aliases_changed_root_buffer = [this, &changed_root_buffers](
const HloInstruction* inst) {
bool aliasing = false;
ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape,
const ShapeIndex& index) {
if (aliasing) {
return;
}
if (subshape.element_type() != F32) {
return;
}
aliasing = absl::c_any_of(dataflow_->GetValueSet(inst, index).values(),
IsValueIn(changed_root_buffers));
});
return aliasing;
};
for (auto inst :
fusion->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kParameter) {
continue;
}
if (aliases_changed_root_buffer(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kFusion) {
bool parameter_reverted = false;
for (int64_t i = 0; i < inst->operand_count(); ++i) {
if (has_changes(inst->mutable_operand(i))) {
continue;
}
auto* fused_parameter = inst->fused_parameter(i);
if (has_changes(fused_parameter)) {
changes_to_bf16_.erase(fused_parameter);
parameter_reverted = true;
}
}
if (parameter_reverted) {
RevertIfFusionInternalBF16Changes(inst);
}
}
if (!has_changes(inst)) {
continue;
}
bool revert_changes = true;
for (auto operand : inst->operands()) {
if (has_changes(operand)) {
revert_changes = false;
break;
}
}
if (revert_changes) {
changes_to_bf16_.erase(inst);
}
}
}
void BFloat16Propagation::DetermineWhileComputationsPrecision(
HloInstruction* while_hlo) {
CHECK_EQ(while_hlo->opcode(), HloOpcode::kWhile);
HloComputation* body = while_hlo->while_body();
auto body_root = body->root_instruction();
HloComputation* condition = while_hlo->while_condition();
ShapeUtil::ForEachSubshape(
body_root->shape(), [this, while_hlo, body_root](
const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(while_hlo, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(body_root, index, BF16);
VLOG(2) << "While body root " << body_root->ToString()
<< " at shape index " << index
<< " changed to BF16 precision for while "
<< while_hlo->ToString();
}
});
auto body_insts = body->MakeInstructionPostOrder();
for (auto inst_it = body_insts.rbegin(); inst_it != body_insts.rend();
++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(body);
auto condition_insts = condition->MakeInstructionPostOrder();
for (auto inst_it = condition_insts.rbegin();
inst_it != condition_insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(condition);
}
void BFloat16Propagation::DetermineConditionalComputationsPrecision(
HloInstruction* cond) {
CHECK_EQ(cond->opcode(), HloOpcode::kConditional);
for (int64_t i = 0; i < cond->branch_count(); ++i) {
auto branch = cond->branch_computation(i);
auto root = branch->root_instruction();
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (subshape.element_type() != F32) {
return;
}
if (OutputTypeAfterChange(cond, index) == BF16) {
AddToOrRemoveFromBF16ChangeSet(root, index, BF16);
VLOG(2) << "Conditional branch " << i << " root "
<< root->ToString() << " at shape index " << index
<< " changed to BF16 precision for conditional "
<< cond->ToString();
}
});
auto insts = branch->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it, false);
}
computations_visited_in_backward_pass_.insert(branch);
}
}
bool BFloat16Propagation::AllUsersConsumeBF16(const HloInstruction& hlo,
const ShapeIndex& index) const {
const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index);
if (subshape.element_type() != BF16 && subshape.element_type() != F32) {
return false;
}
auto& value_set = dataflow_->GetValueSet(&hlo, index);
for (const HloValue* value : value_set.values()) {
if (ContainsKey(values_that_must_be_kept_as_f32_, value)) {
return false;
}
if (value->shape().element_type() == BF16) {
continue;
}
for (const HloUse& use : value->GetUses()) {
if (!ContainsKey(instructions_visited_in_backward_pass_,
use.instruction)) {
continue;
}
if (use.instruction->HasSideEffectNoRecurse()) {
return false;
}
if (use.instruction->opcode() == HloOpcode::kFusion) {
auto* fused_parameter =
use.instruction->fused_parameter(use.operand_number);
if (OutputTypeAfterChange(fused_parameter, use.operand_index) != BF16) {
return false;
}
continue;
} else if (use.instruction->opcode() == HloOpcode::kWhile) {
auto* cond_parameter =
use.instruction->while_condition()->parameter_instruction(
use.operand_number);
if (OutputTypeAfterChange(cond_parameter, use.operand_index) != BF16) {
return false;
}
auto* body_parameter =
use.instruction->while_body()->parameter_instruction(
use.operand_number);
if (OutputTypeAfterChange(body_parameter, use.operand_index) != BF16) {
return false;
}
continue;
} else if (use.instruction->opcode() == HloOpcode::kConditional) {
auto* cond_parameter =
use.instruction->branch_computation(use.operand_number - 1)
->parameter_instruction(0);
if (OutputTypeAfterChange(cond_parameter, use.operand_index) != BF16) {
return false;
}
continue;
}
if (bfloat16_support_->EffectiveOperandPrecisionIsLowPrecision(
*use.instruction, use.operand_number)) {
continue;
}
if (bfloat16_support_->EffectiveOperandPrecisionIsOutputPrecision(
*use.instruction, use.operand_number)) {
if (use.instruction->opcode() == HloOpcode::kTuple ||
(use.instruction->opcode() == HloOpcode::kAllReduce &&
use.instruction->shape().IsTuple())) {
ShapeIndex use_output_index{use.operand_number};
for (int64_t i : use.operand_index) {
use_output_index.push_back(i);
}
if (OutputTypeAfterChange(use.instruction, use_output_index) ==
BF16) {
continue;
}
} else if (use.instruction->opcode() == HloOpcode::kGetTupleElement) {
ShapeIndex use_output_index;
for (int64_t i = 1; i < use.operand_index.size(); ++i) {
use_output_index.push_back(use.operand_index[i]);
}
if (OutputTypeAfterChange(use.instruction, use_output_index) ==
BF16) {
continue;
}
} else {
if (OutputTypeAfterChange(use.instruction, use.operand_index) ==
BF16) {
continue;
}
}
}
return false;
}
}
return true;
}
bool BFloat16Propagation::ShouldKeepPrecisionUnchanged(
const HloInstruction* inst) {
if (inst->opcode() == HloOpcode::kFusion &&
inst->fusion_kind() == HloInstruction::FusionKind::kCustom) {
return ShouldKeepPrecisionUnchanged(
inst->fused_instructions_computation()->root_instruction());
}
return (inst->opcode() == HloOpcode::kCustomCall &&
!inst->IsCustomCall("AllocateBuffer")) ||
inst->opcode() == HloOpcode::kCall ||
inst->opcode() == HloOpcode::kBitcastConvert ||
inst->HasSideEffectNoRecurse();
}
void BFloat16Propagation::DetermineInstructionPrecision(HloInstruction* hlo,
bool skip_parameters) {
bool postpone_processing_called_computations = false;
absl::Cleanup cleaner = [this, hlo,
&postpone_processing_called_computations] {
if (!postpone_processing_called_computations) {
if (hlo->opcode() == HloOpcode::kFusion) {
DetermineFusionComputationPrecision(hlo);
} else if (hlo->opcode() == HloOpcode::kWhile) {
DetermineWhileComputationsPrecision(hlo);
} else if (hlo->opcode() == HloOpcode::kConditional) {
DetermineConditionalComputationsPrecision(hlo);
}
}
instructions_visited_in_backward_pass_.insert(hlo);
};
if (hlo->opcode() == HloOpcode::kWhile &&
(caller_counts_[hlo->while_condition()] > 1 ||
caller_counts_[hlo->while_body()] > 1)) {
postpone_processing_called_computations = true;
return;
}
if (hlo->opcode() == HloOpcode::kConditional &&
absl::c_any_of(hlo->branch_computations(), [&](const HloComputation* c) {
return caller_counts_[c] > 1;
})) {
postpone_processing_called_computations = true;
return;
}
CHECK(hlo->parent() != nullptr);
if (hlo == hlo->parent()->root_instruction()) {
if (!hlo->parent()->IsFusionComputation()) {
ShapeUtil::ForEachSubshape(hlo->shape(), [&](const Shape& ,
const ShapeIndex& index) {
if (OutputTypeAfterChange(hlo, index) != F32) {
return;
}
for (const auto* value : dataflow_->GetValueSet(hlo, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
});
}
return;
}
if (ShouldKeepPrecisionUnchanged(hlo) ||
(hlo->opcode() == HloOpcode::kParameter && skip_parameters)) {
return;
}
if (!ContainsKey(consider_using_bfloat16_, hlo)) {
return;
}
if (!bfloat16_support_->SupportsLowPrecisionOutput(*hlo)) {
return;
}
ShapeUtil::ForEachSubshape(
hlo->shape(),
[hlo, this](const Shape& , const ShapeIndex& index) {
if (OutputTypeAfterChange(hlo, index) == F32 &&
AllUsersConsumeBF16(*hlo, index)) {
AddToOrRemoveFromBF16ChangeSet(hlo, index, BF16);
VLOG(2) << "HloInstruction output at shape index " << index
<< " changed to BF16 precision: " << hlo->ToString();
}
});
}
bool BFloat16Propagation::InstructionIsCandidateForBF16Output(
HloInstruction* hlo) {
if (!bfloat16_support_->SupportsMixedPrecisions(*hlo) &&
hlo->opcode() != HloOpcode::kTuple &&
hlo->opcode() != HloOpcode::kGetTupleElement &&
hlo->opcode() != HloOpcode::kDomain &&
hlo->shape().element_type() != BF16) {
for (int64_t i = 0; i < hlo->operand_count(); ++i) {
if (!bfloat16_support_->EffectiveOperandPrecisionIsOutputPrecision(*hlo,
i) ||
!ContainsKey(consider_using_bfloat16_, hlo->operand(i))) {
return false;
}
}
}
return true;
}
void BFloat16Propagation::AdjustCalledComputationParameters(
HloInstruction* hlo) {
auto adjust_computation = [this, hlo](
HloComputation* computation,
absl::Span<HloInstruction* const> operands) {
CHECK_EQ(operands.size(), computation->num_parameters());
for (int64_t i = 0; i < operands.size(); ++i) {
auto parameter = computation->parameter_instruction(i);
ShapeUtil::ForEachSubshape(
parameter->shape(),
[this, i, hlo, &operands, parameter](const Shape& ,
const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(parameter->shape(), index)) {
return;
}
PrimitiveType operand_type =
OutputTypeAfterChange(operands[i], index);
if (OutputTypeAfterChange(parameter, index) == operand_type) {
return;
}
AddToOrRemoveFromBF16ChangeSet(parameter, index, operand_type);
VLOG(2) << "Called computation parameter " << parameter->ToString()
<< " at shape index " << index << " adjusted to "
<< (operand_type == BF16 ? "BF16" : "F32")
<< " to match operand in HLO " << hlo->ToString();
});
}
};
switch (hlo->opcode()) {
case HloOpcode::kFusion:
adjust_computation(hlo->fused_instructions_computation(),
hlo->operands());
break;
case HloOpcode::kWhile:
adjust_computation(hlo->while_condition(), hlo->operands());
adjust_computation(hlo->while_body(), hlo->operands());
break;
case HloOpcode::kConditional:
for (int64_t i = 0; i < hlo->branch_count(); ++i) {
adjust_computation(hlo->branch_computation(i),
{hlo->mutable_operand(i + 1)});
}
break;
default:
break;
}
}
void BFloat16Propagation::AdjustCalledComputationRoot(HloInstruction* hlo) {
auto adjust_computation = [this, hlo](HloComputation* computation,
HloInstruction* output) {
HloInstruction* root = computation->root_instruction();
ShapeUtil::ForEachSubshape(root->shape(), [this, hlo, root, output](
const Shape& ,
const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(hlo->shape(), index)) {
return;
}
const PrimitiveType output_type = OutputTypeAfterChange(output, index);
if (OutputTypeAfterChange(root, index) == output_type) {
return;
}
AddToOrRemoveFromBF16ChangeSet(root, index, output_type);
if (output_type == F32) {
for (const auto* value : dataflow_->GetValueSet(root, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
}
VLOG(2) << "Called computation root " << root->ToString()
<< " at shape index " << index << " adjusted to "
<< (output_type == BF16 ? "BF16" : "F32")
<< " to match output shape of " << hlo->ToString();
});
};
switch (hlo->opcode()) {
case HloOpcode::kFusion:
adjust_computation(hlo->fused_instructions_computation(), hlo);
break;
case HloOpcode::kWhile:
adjust_computation(hlo->while_body(), hlo);
break;
case HloOpcode::kConditional:
for (auto* branch : hlo->branch_computations()) {
adjust_computation(branch, hlo);
}
break;
default:
break;
}
}
bool BFloat16Propagation::ResolveInconsistencyOfAliasingBuffersHelper(
HloComputation* computation,
absl::flat_hash_set<const HloComputation*>* visited_computations) {
bool parameter_changed = false;
auto insts = computation->MakeInstructionPostOrder();
while (true) {
bool any_change = false;
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
auto hlo = *inst_it;
auto adjust_hlo_output = [&](const Shape& ,
const ShapeIndex& index) {
const PrimitiveType output_type = OutputTypeAfterChange(hlo, index);
VLOG(2) << "output_type is " << ((output_type == BF16) ? "BF16" : "F32")
<< " for :" << hlo->ToString() << "\n";
if (output_type != F32 && output_type != BF16) {
return;
}
PrimitiveType type = BF16;
for (const auto* value : dataflow_->GetValueSet(hlo, index).values()) {
auto value_type = ValueTypeAfterChange(value);
if (value_type == BF16) {
continue;
}
VLOG(2) << "Adjust to F32 due to aliased dataflow value: "
<< value->ToString() << "\n";
CHECK_EQ(value_type, F32);
type = F32;
break;
}
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(hlo)) {
if (operand_and_output_index.second == index) {
const HloOperandIndex& operand_index =
operand_and_output_index.first;
for (const auto* value :
dataflow_
->GetValueSet(hlo->operand(operand_index.operand_number),
operand_index.operand_index)
.values()) {
auto value_type = ValueTypeAfterChange(value);
if (value_type == BF16) {
continue;
}
VLOG(2) << "Adjust to F32 due to InputOutPair: "
<< value->ToString() << "\n";
CHECK_EQ(value_type, F32);
type = F32;
break;
}
}
}
if (type == BF16 && !AllUsersConsumeBF16(*hlo, index)) {
VLOG(2) << "Adjust to F32 due to All user consumeBF16 fail\n";
type = F32;
}
if (type == F32) {
for (const auto* value :
dataflow_->GetValueSet(hlo, index).values()) {
values_that_must_be_kept_as_f32_.insert(value);
}
}
if (type != output_type) {
any_change = true;
AddToOrRemoveFromBF16ChangeSet(hlo, index, type);
VLOG(2) << "HloInstruction output at shape index " << index
<< " adjusted to " << (type == BF16 ? "BF16" : "F32") << ": "
<< hlo->ToString();
if (hlo->opcode() == HloOpcode::kParameter) {
parameter_changed = true;
}
}
};
ShapeUtil::ForEachSubshape(hlo->shape(), adjust_hlo_output);
AdjustCalledComputationRoot(hlo);
if (hlo->opcode() == HloOpcode::kWhile) {
absl::flat_hash_set<const HloComputation*> visited_in_while;
while (ResolveInconsistencyOfAliasingBuffersHelper(
hlo->while_condition(), &visited_in_while) ||
ResolveInconsistencyOfAliasingBuffersHelper(hlo->while_body(),
&visited_in_while)) {
visited_in_while.clear();
ShapeUtil::ForEachSubshape(hlo->shape(), adjust_hlo_output);
AdjustCalledComputationRoot(hlo);
}
visited_computations->insert(visited_in_while.begin(),
visited_in_while.end());
} else if (hlo->opcode() == HloOpcode::kFusion) {
ResolveInconsistencyOfAliasingBuffersHelper(
hlo->fused_instructions_computation(), visited_computations);
} else if (hlo->opcode() == HloOpcode::kConditional) {
for (auto* branch : hlo->branch_computations()) {
ResolveInconsistencyOfAliasingBuffersHelper(branch,
visited_computations);
}
}
}
if (!any_change) {
break;
}
}
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
AdjustCalledComputationParameters(*inst_it);
}
return parameter_changed;
}
void BFloat16Propagation::ResolveInconsistencyOfAliasingBuffers(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto& computations_topological_order =
module->MakeComputationPostOrder(execution_threads);
absl::flat_hash_set<const HloComputation*> resolved;
for (auto comp_it = computations_topological_order.rbegin();
comp_it != computations_topological_order.rend(); ++comp_it) {
if (ContainsKey(resolved, *comp_it)) {
continue;
}
ResolveInconsistencyOfAliasingBuffersHelper(*comp_it, &resolved);
}
}
absl::Status BFloat16Propagation::ResolveInconsistentFusions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto computation : module->MakeComputationPostOrder(execution_threads)) {
auto insts = computation->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
auto hlo = *inst_it;
if (hlo->opcode() != HloOpcode::kFusion) {
continue;
}
auto fusion_computation = hlo->fused_instructions_computation();
auto fusion_root = fusion_computation->root_instruction();
if (ShapeUtil::Compatible(fusion_root->shape(), hlo->shape())) {
continue;
}
ShapeTree<HloInstruction*> converted_outputs(hlo->shape());
TF_ASSIGN_OR_RETURN(
HloInstruction * copy,
fusion_computation->DeepCopyInstructionWithCustomCopier(
fusion_root,
[hlo](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
const Shape& hlo_subshape =
ShapeUtil::GetSubshape(hlo->shape(), leaf_index);
if (ShapeUtil::Compatible(leaf->shape(), hlo_subshape)) {
return leaf;
}
return comp->AddInstruction(
HloInstruction::CreateConvert(hlo_subshape, leaf));
}));
fusion_computation->set_root_instruction(copy);
}
}
return absl::OkStatus();
}
absl::Status BFloat16Propagation::ResolveConvertedConstants(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto computation : module->MakeComputationPostOrder(execution_threads)) {
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kConstant) {
continue;
}
if (!Shape::Equal().MinorToMajorOnlyInLayout()(hlo->literal().shape(),
hlo->shape())) {
TF_ASSIGN_OR_RETURN(auto converted_literal,
hlo->literal().ConvertToShape(hlo->shape()));
auto new_constant = computation->AddInstruction(
HloInstruction::CreateConstant(std::move(converted_literal)));
UpdateLayout(new_constant->mutable_shape());
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_constant));
}
}
}
return absl::OkStatus();
}
absl::Status BFloat16Propagation::SkipNoopConversions(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto computation : module->computations(execution_threads)) {
for (auto hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kConvert) {
continue;
}
auto source = hlo->mutable_operand(0);
if (!ShapeUtil::Equal(source->shape(), hlo->shape())) {
continue;
}
const bool is_root = hlo == computation->root_instruction();
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(source));
if (is_root) {
computation->set_root_instruction(source);
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> BFloat16Propagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
consider_using_bfloat16_.clear();
instructions_visited_in_backward_pass_.clear();
computations_visited_in_backward_pass_.clear();
values_that_must_be_kept_as_f32_.clear();
caller_counts_.clear();
changes_to_bf16_.clear();
changed_ = false;
auto computations_topological_order =
module->MakeComputationPostOrder(execution_threads);
for (auto computation : computations_topological_order) {
for (auto inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kWhile) {
continue;
}
auto operand = inst->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * copy,
computation->DeepCopyInstructionWithCustomCopier(
operand, [](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
if (leaf->shape().element_type() != F32) {
return leaf;
}
return comp->AddInstruction(
HloInstruction::CreateConvert(leaf->shape(), leaf));
}));
TF_RETURN_IF_ERROR(operand->ReplaceUseWith(inst, copy));
}
}
TF_ASSIGN_OR_RETURN(dataflow_, HloDataflowAnalysis::Run(*module));
for (auto computation : computations_topological_order) {
for (auto inst : computation->MakeInstructionPostOrder()) {
if (InstructionIsCandidateForBF16Output(inst)) {
consider_using_bfloat16_.insert(inst);
}
}
}
for (auto comp_it = computations_topological_order.rbegin();
comp_it != computations_topological_order.rend(); ++comp_it) {
if (ContainsKey(computations_visited_in_backward_pass_, *comp_it)) {
continue;
}
auto insts = (*comp_it)->MakeInstructionPostOrder();
for (auto inst_it = insts.rbegin(); inst_it != insts.rend(); ++inst_it) {
DetermineInstructionPrecision(*inst_it,
true);
}
computations_visited_in_backward_pass_.insert(*comp_it);
}
ResolveInconsistencyOfAliasingBuffers(module, execution_threads);
for (auto& change : changes_to_bf16_) {
auto inst = change.first;
if (ShouldKeepPrecisionUnchanged(inst)) {
auto users = inst->users();
bool is_root = inst == inst->parent()->root_instruction();
TF_ASSIGN_OR_RETURN(
HloInstruction * copy,
inst->parent()->DeepCopyInstructionWithCustomCopier(
inst, [&](HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* comp) {
if (!ContainsKey(change.second,
ShapeUtil::GetMutableSubshape(
inst->mutable_shape(), leaf_index))) {
return leaf;
}
auto converted_shape =
ShapeUtil::ChangeElementType(leaf->shape(), BF16);
UpdateLayout(&converted_shape);
return comp->AddInstruction(
HloInstruction::CreateConvert(converted_shape, leaf));
}));
for (auto user : users) {
TF_RETURN_IF_ERROR(inst->ReplaceUseWithDifferentShape(user, copy));
}
if (is_root) {
inst->parent()->set_root_instruction(copy,
true);
}
continue;
}
for (const auto& entry : change.second) {
auto subshape = entry.first;
CHECK_EQ(subshape->element_type(), F32);
subshape->set_element_type(BF16);
UpdateLayout(subshape);
changed_ = true;
}
}
auto clean_up = [this, module, &execution_threads]() {
TF_RETURN_IF_ERROR(SkipNoopConversions(module, execution_threads));
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(
tuple_simplifier.Run(module, execution_threads).status());
HloDCE dce;
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
return absl::OkStatus();
};
if (!changed_) {
TF_RETURN_IF_ERROR(clean_up());
return false;
}
TF_RETURN_IF_ERROR(ResolveInconsistentFusions(module, execution_threads));
TF_RETURN_IF_ERROR(ResolveConvertedConstants(module, execution_threads));
TF_RETURN_IF_ERROR(clean_up());
return true;
}
PrimitiveType BFloat16Propagation::OutputTypeAfterChange(
HloInstruction* hlo, const ShapeIndex& index) const {
Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index);
const PrimitiveType type_on_hlo = subshape->element_type();
if (type_on_hlo != F32) {
return type_on_hlo;
}
auto it = changes_to_bf16_.find(hlo);
if (it == changes_to_bf16_.end()) {
return type_on_hlo;
}
return ContainsKey(it->second, subshape) ? BF16 : F32;
}
PrimitiveType BFloat16Propagation::ValueTypeAfterChange(
const HloValue* value) const {
auto hlo = value->defining_instruction();
const auto& position = value->defining_position();
return OutputTypeAfterChange(hlo, position.index);
}
void BFloat16Propagation::AddToOrRemoveFromBF16ChangeSet(
HloInstruction* hlo, const ShapeIndex& index, PrimitiveType target_type) {
if (target_type == BF16) {
auto& entry = changes_to_bf16_[hlo];
entry.emplace(ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index),
index);
} else {
CHECK_EQ(target_type, F32);
auto it = changes_to_bf16_.find(hlo);
if (it == changes_to_bf16_.end()) {
return;
}
it->second.erase(
ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index));
if (it->second.empty()) {
changes_to_bf16_.erase(it);
}
}
}
} | #include "xla/service/bfloat16_propagation.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/float_support.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
class TestBFloat16Support : public FloatSupport {
public:
TestBFloat16Support() : FloatSupport(BF16) {}
~TestBFloat16Support() override {}
bool SupportsLowPrecisionOperand(const HloInstruction& hlo,
int64_t operand_index) const override {
return true;
}
bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override {
return true;
}
bool SupportsMixedPrecisions(const HloInstruction& hlo) const override {
return true;
}
bool EffectiveOperandPrecisionIsLowPrecision(
const HloInstruction& hlo, int64_t operand_index) const override {
return hlo.opcode() == HloOpcode::kDot;
}
};
class BFloat16PropagationTest : public HloTestBase {
protected:
BFloat16PropagationTest()
: HloTestBase(false,
true) {}
bool PropagatePrecision(HloModule* module) {
TestBFloat16Support bfloat16_support;
BFloat16Propagation propagation(&bfloat16_support);
absl::StatusOr<bool> result = propagation.Run(module);
EXPECT_IS_OK(result.status());
return result.value();
}
bool OutputsBF16(const HloInstruction* inst) {
if (inst->shape().element_type() == BF16) {
return true;
}
return inst->user_count() == 1 &&
inst->users()[0]->opcode() == HloOpcode::kConvert &&
inst->users()[0]->shape().element_type() == BF16;
}
std::unique_ptr<HloInstruction> CreateDot(const Shape& shape,
HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
DefaultPrecisionConfig(2));
}
};
TEST_F(BFloat16PropagationTest, PropagateThroughSelectButNotAdd) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* c =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "c"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, b));
HloInstruction* pred = builder.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {2, 4}), a, b, ComparisonDirection::kEq));
HloInstruction* sel = builder.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kSelect, pred, c, add1));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), sel, {1, 0}));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), xpose, a));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {4, 4}), HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(sel));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_FALSE(OutputsBF16(add0));
EXPECT_FALSE(OutputsBF16(a));
EXPECT_FALSE(OutputsBF16(b));
EXPECT_FALSE(OutputsBF16(c));
}
TEST_F(BFloat16PropagationTest, PropagateThroughMaxPoolReduceWindow) {
auto module = CreateNewVerifiedModule();
auto sub_builder = HloComputation::Builder("max");
HloInstruction* p0 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "a"));
HloInstruction* p1 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "b"));
sub_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, p0, p1));
auto max_computation = module->AddEmbeddedComputation(sub_builder.Build());
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* c =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "c"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
HloInstruction* rw =
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, add,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32))),
window, max_computation));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), c, {1, 0}));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), xpose, rw));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {4, 4}), HloOpcode::kAdd, dot, dot));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(rw));
}
TEST_F(BFloat16PropagationTest, DoNotChangeAllReduce) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
auto rb = HloComputation::Builder(TestName());
rb.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd,
rb.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0")),
rb.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"))));
auto reduction = module->AddEmbeddedComputation(rb.Build());
HloInstruction* all_reduce =
builder.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShape({shape, shape}), {a, b}, reduction,
CollectiveDeviceList(), false,
1, false));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, all_reduce, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, all_reduce, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, gte0, gte1));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
}
TEST_F(BFloat16PropagationTest, ConvertConstantLiteral) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
Array2D<float> array_a(4, 4);
array_a.FillUnique(1.0f);
Array2D<float> array_b(4, 4);
array_b.FillUnique(10.0f);
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_a)));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateFromArray(array_b)));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, a, b));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(dot->operand(0)));
EXPECT_TRUE(OutputsBF16(dot->operand(1)));
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kConstant);
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::ConvertF32ToBF16(LiteralUtil::CreateFromArray(array_a)),
dot->operand(0)->literal()));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::ConvertF32ToBF16(LiteralUtil::CreateFromArray(array_b)),
dot->operand(1)->literal()));
}
TEST_F(BFloat16PropagationTest, PropagateThroughTuples) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, a));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, b, b));
HloInstruction* xpose =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), add1, {1, 0}));
HloInstruction* tuple0 =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1, add2}));
HloInstruction* tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({tuple0, xpose}));
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(xpose->shape(), tuple1, 1));
HloInstruction* rhs =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
add0->shape(),
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
tuple0->shape(), tuple1, 0)),
0));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), lhs, rhs));
HloInstruction* output_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({dot, add2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), output_tuple);
EXPECT_TRUE(OutputsBF16(xpose));
EXPECT_TRUE(OutputsBF16(add0));
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_FALSE(OutputsBF16(add2));
}
TEST_F(BFloat16PropagationTest, SameValueReferencedTwice) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, a));
HloInstruction* lhs = builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {4, 2}), add1, {1, 0}));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(add1->shape(), tuple, 1));
HloInstruction* dot = builder.AddInstruction(
CreateDot(ShapeUtil::MakeShape(F32, {4, 4}), lhs, rhs));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(add1));
EXPECT_TRUE(OutputsBF16(lhs));
}
TEST_F(BFloat16PropagationTest, DoNotChangeComputationRoot) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a, b));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, add, add));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add, dot}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), tuple);
EXPECT_FALSE(OutputsBF16(add));
}
TEST_F(BFloat16PropagationTest, PropagateThroughFusion) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f0 = HloComputation::Builder("fusion0");
HloInstruction* a_f0 =
builder_f0.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f0 =
builder_f0.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* tuple_f0 =
builder_f0.AddInstruction(HloInstruction::CreateTuple({a_f0, b_f0}));
auto comp_f0 = module->AddEmbeddedComputation(builder_f0.Build());
auto fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_f0->shape(), HloInstruction::FusionKind::kCustom, {add, add},
comp_f0));
auto builder_f1 = HloComputation::Builder("fusion1");
HloInstruction* p_f1 = builder_f1.AddInstruction(
HloInstruction::CreateParameter(0, tuple_f0->shape(), "param"));
HloInstruction* a_f1 = builder_f1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p_f1, 0));
HloInstruction* b_f1 = builder_f1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p_f1, 1));
HloInstruction* dot = builder_f1.AddInstruction(CreateDot(shape, a_f1, b_f1));
auto comp_f1 = module->AddEmbeddedComputation(builder_f1.Build());
auto fusion1 = builder.AddInstruction(HloInstruction::CreateFusion(
dot->shape(), HloInstruction::FusionKind::kCustom, {fusion0}, comp_f1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), fusion1);
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(a_f0));
EXPECT_TRUE(OutputsBF16(b_f0));
EXPECT_TRUE(OutputsBF16(a_f1));
EXPECT_TRUE(OutputsBF16(b_f1));
}
TEST_F(BFloat16PropagationTest, FusionWithBitcastConvertRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape u32_shape = ShapeUtil::MakeShape(U32, {4, 4});
Shape f32_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, u32_shape, "param"));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f = builder_f.AddInstruction(
HloInstruction::CreateParameter(0, u32_shape, "a"));
HloInstruction* bc_f = builder_f.AddInstruction(
HloInstruction::CreateBitcastConvert(f32_shape, a_f));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
f32_shape, HloInstruction::FusionKind::kLoop, {param}, comp_f));
auto dot = builder.AddInstruction(CreateDot(f32_shape, fusion, fusion));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_EQ(bc_f->shape(), f32_shape);
EXPECT_TRUE(OutputsBF16(bc_f));
}
TEST_F(BFloat16PropagationTest, DiscardFusionInternalBF16Changes) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add_f = builder_f.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a_f, b_f));
HloInstruction* dot_f =
builder_f.AddInstruction(CreateDot(shape, add_f, add_f));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
dot_f->shape(), HloInstruction::FusionKind::kCustom, {add, add}, comp_f));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), fusion);
}
TEST_F(BFloat16PropagationTest, ConvertTupleFusionElementIfUsedByAdd) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
auto builder_f = HloComputation::Builder("fusion0");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* add_f = builder_f.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, a_f, b_f));
HloInstruction* tuple_f =
builder_f.AddInstruction(HloInstruction::CreateTuple({a_f, add_f}));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto fusion = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_f->shape(), HloInstruction::FusionKind::kCustom, {add, add},
comp_f));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, gte0, gte1));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(gte0));
EXPECT_TRUE(OutputsBF16(gte1));
EXPECT_FALSE(OutputsBF16(a_f));
EXPECT_FALSE(OutputsBF16(b_f));
EXPECT_TRUE(OutputsBF16(add_f));
auto new_fusion_root = comp_f->root_instruction();
EXPECT_EQ(new_fusion_root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(new_fusion_root->operand(1), add_f);
EXPECT_EQ(new_fusion_root->operand(0)->opcode(), HloOpcode::kConvert);
EXPECT_TRUE(OutputsBF16(new_fusion_root->operand(0)));
}
TEST_F(BFloat16PropagationTest, PropagateThroughSimpleWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
auto cond_dot =
builder_cond.AddInstruction(CreateDot(shape, cond_param, cond_param));
auto cond_root = builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, shape, "body_param"));
auto body_dot =
builder_body.AddInstruction(CreateDot(shape, body_param, body_param));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(shape, cond, body, add));
auto dot = builder.AddInstruction(CreateDot(shape, while_hlo, while_hlo));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(
ShapeUtil::Equal(cond_root->shape(), ShapeUtil::MakeShape(PRED, {})));
EXPECT_TRUE(OutputsBF16(add));
EXPECT_TRUE(OutputsBF16(body_dot));
EXPECT_TRUE(OutputsBF16(body_param));
EXPECT_TRUE(OutputsBF16(cond_param));
EXPECT_FALSE(OutputsBF16(dot));
}
TEST_F(BFloat16PropagationTest,
ConditionPreventsPropagationForFusionInsideWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, shape, "cond_param"));
builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1, 1}), cond_param, {0, 0}, {1, 1},
{1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(F32, {1, 1}), cond_param, {1, 1}, {2, 2},
{1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, shape, "body_param"));
auto body_transpose = builder_body.AddInstruction(
HloInstruction::CreateTranspose(shape, body_param, {0, 1}));
auto builder_f = HloComputation::Builder("fusion");
HloInstruction* a_f =
builder_f.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
builder_f.AddInstruction(HloInstruction::CreateTranspose(shape, a_f, {0, 1}));
auto comp_f = module->AddEmbeddedComputation(builder_f.Build());
auto body_fusion = builder_body.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {body_transpose}, comp_f));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(shape, cond, body, add));
auto dot = builder.AddInstruction(CreateDot(shape, while_hlo, while_hlo));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_FALSE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_FALSE(OutputsBF16(add));
EXPECT_FALSE(OutputsBF16(body_fusion));
EXPECT_FALSE(OutputsBF16(body_param));
EXPECT_FALSE(OutputsBF16(body_transpose));
EXPECT_FALSE(OutputsBF16(a_f));
}
TEST_F(BFloat16PropagationTest, PropagateThroughTupleWhile) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
auto builder_cond = HloComputation::Builder("cond");
auto cond_param = builder_cond.AddInstruction(
HloInstruction::CreateParameter(0, tuple->shape(), "cond_param"));
auto cond_lhs = builder_cond.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond_param, 0));
auto cond_rhs = builder_cond.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond_param, 1));
auto cond_add_rhs = builder_cond.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, cond_rhs, cond_rhs));
auto cond_dot =
builder_cond.AddInstruction(CreateDot(shape, cond_lhs, cond_add_rhs));
builder_cond.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond = module->AddEmbeddedComputation(builder_cond.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, tuple->shape(), "body_param"));
auto body_lhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
auto body_rhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 1));
auto body_dot1 =
builder_body.AddInstruction(CreateDot(shape, body_lhs, body_rhs));
auto body_dot2 =
builder_body.AddInstruction(CreateDot(shape, body_rhs, body_lhs));
auto body_transpose = builder_body.AddInstruction(
HloInstruction::CreateTranspose(shape, body_dot2, {0, 1}));
builder_body.AddInstruction(
HloInstruction::CreateTuple({body_dot1, body_transpose}));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while_hlo = builder.AddInstruction(
HloInstruction::CreateWhile(tuple->shape(), cond, body, tuple));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_hlo, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_hlo, 1));
auto dot = builder.AddInstruction(CreateDot(shape, lhs, rhs));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), dot);
EXPECT_TRUE(OutputsBF16(lhs));
EXPECT_FALSE(OutputsBF16(rhs));
EXPECT_TRUE(OutputsBF16(body_dot1));
EXPECT_TRUE(OutputsBF16(body_lhs));
EXPECT_FALSE(OutputsBF16(body_rhs));
EXPECT_FALSE(OutputsBF16(body_dot2));
EXPECT_FALSE(OutputsBF16(body_transpose));
EXPECT_TRUE(OutputsBF16(cond_lhs));
EXPECT_FALSE(OutputsBF16(cond_rhs));
EXPECT_TRUE(OutputsBF16(add0));
EXPECT_FALSE(OutputsBF16(add1));
}
TEST_F(BFloat16PropagationTest, DoNotPropagateWhilesCallingSameComputation) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param1"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, param1));
HloInstruction* tuple0 =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* tuple1 =
builder.AddInstruction(HloInstruction::CreateTuple({add2, add3}));
auto builder_cond0 = HloComputation::Builder("cond0");
auto cond0_param = builder_cond0.AddInstruction(
HloInstruction::CreateParameter(0, tuple0->shape(), "cond0_param"));
auto cond0_lhs = builder_cond0.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond0_param, 0));
auto cond0_rhs = builder_cond0.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond0_param, 1));
auto cond0_add_rhs =
builder_cond0.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, cond0_rhs, cond0_rhs));
auto cond0_dot =
builder_cond0.AddInstruction(CreateDot(shape, cond0_lhs, cond0_add_rhs));
builder_cond0.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond0.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond0.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond0_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond0.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond0.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond0_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond0 = module->AddEmbeddedComputation(builder_cond0.Build());
auto builder_cond1 = HloComputation::Builder("cond1");
auto cond1_param = builder_cond1.AddInstruction(
HloInstruction::CreateParameter(0, tuple1->shape(), "cond1_param"));
auto cond1_lhs = builder_cond1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond1_param, 0));
auto cond1_rhs = builder_cond1.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, cond1_param, 1));
auto cond1_add_lhs =
builder_cond1.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, cond1_lhs, cond1_lhs));
auto cond1_dot =
builder_cond1.AddInstruction(CreateDot(shape, cond1_add_lhs, cond1_rhs));
builder_cond1.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}),
builder_cond1.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond1.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond1_dot, {0, 0}, {1, 1}, {1, 1})))),
builder_cond1.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {}),
builder_cond1.AddInstruction(
HloInstruction::CreateSlice(ShapeUtil::MakeShape(F32, {1, 1}),
cond1_dot, {1, 1}, {2, 2}, {1, 1})))),
ComparisonDirection::kGt));
auto cond1 = module->AddEmbeddedComputation(builder_cond1.Build());
auto builder_body = HloComputation::Builder("body");
auto body_param = builder_body.AddInstruction(
HloInstruction::CreateParameter(0, tuple0->shape(), "body_param"));
auto body_lhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
auto body_rhs = builder_body.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 1));
auto body_dot =
builder_body.AddInstruction(CreateDot(shape, body_lhs, body_rhs));
builder_body.AddInstruction(
HloInstruction::CreateTuple({body_dot, body_rhs}));
auto body = module->AddEmbeddedComputation(builder_body.Build());
auto while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple0->shape(), cond0, body, tuple0));
auto while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple1->shape(), cond1, body, tuple1));
auto lhs = builder.AddInstruction(
CreateDot(shape,
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while0, 0)),
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while0, 1))));
auto rhs = builder.AddInstruction(
CreateDot(shape,
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while1, 0)),
builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while1, 1))));
auto dot = builder.AddInstruction(CreateDot(shape, lhs, rhs));
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_FALSE(OutputsBF16(body_dot));
EXPECT_FALSE(OutputsBF16(body_rhs));
EXPECT_FALSE(OutputsBF16(body_lhs));
EXPECT_FALSE(OutputsBF16(cond0_lhs));
EXPECT_FALSE(OutputsBF16(cond0_rhs));
EXPECT_FALSE(OutputsBF16(cond1_lhs));
EXPECT_FALSE(OutputsBF16(cond1_rhs));
EXPECT_TRUE(OutputsBF16(cond0_add_rhs));
EXPECT_TRUE(OutputsBF16(cond1_add_lhs));
EXPECT_EQ(computation->root_instruction(), dot);
}
TEST_F(BFloat16PropagationTest, NoopConversionRemoved) {
auto builder = HloComputation::Builder(TestName());
Shape f32_shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape bf16_shape = ShapeUtil::MakeShape(BF16, {4, 4});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32_shape, "param"));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, param, param));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, param, param));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
HloInstruction* gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 0));
HloInstruction* gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32_shape, tuple, 1));
HloInstruction* convert0 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte0));
HloInstruction* convert1 =
builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, gte1));
HloInstruction* add2 = builder.AddInstruction(HloInstruction::CreateBinary(
bf16_shape, HloOpcode::kAdd, convert0, convert1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), add2);
EXPECT_EQ(add2->operand(0), add0);
EXPECT_EQ(add2->operand(1), add1);
EXPECT_EQ(add0->shape().element_type(), BF16);
EXPECT_EQ(add1->shape().element_type(), BF16);
}
TEST_F(BFloat16PropagationTest, TupleDomain) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* a =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a"));
HloInstruction* b =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "b"));
HloInstruction* a_trans =
builder.AddInstruction(HloInstruction::CreateTranspose(shape, a, {0, 1}));
HloInstruction* b_trans =
builder.AddInstruction(HloInstruction::CreateTranspose(shape, b, {0, 1}));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({a_trans, b_trans}));
HloInstruction* domain = builder.AddInstruction(
HloInstruction::CreateDomain(tuple->shape(), tuple, nullptr, nullptr));
HloInstruction* a_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 0));
HloInstruction* b_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 1));
HloInstruction* dot = builder.AddInstruction(CreateDot(shape, a_gte, b_gte));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_EQ(ShapeUtil::GetTupleElementShape(domain->shape(), 0).element_type(),
BF16);
EXPECT_EQ(ShapeUtil::GetTupleElementShape(domain->shape(), 1).element_type(),
BF16);
EXPECT_TRUE(OutputsBF16(a_trans));
EXPECT_TRUE(OutputsBF16(b_trans));
EXPECT_TRUE(OutputsBF16(a_gte));
EXPECT_TRUE(OutputsBF16(b_gte));
EXPECT_FALSE(OutputsBF16(a));
EXPECT_FALSE(OutputsBF16(b));
}
TEST_F(BFloat16PropagationTest, TupleDomainNoPropagation) {
auto builder = HloComputation::Builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
HloInstruction* domain = builder.AddInstruction(
HloInstruction::CreateDomain(param->shape(), param, nullptr, nullptr));
HloInstruction* a_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 0));
HloInstruction* b_gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, domain, 1));
HloInstruction* a_trans = builder.AddInstruction(
HloInstruction::CreateTranspose(shape, a_gte, {0, 1}));
HloInstruction* b_trans = builder.AddInstruction(
HloInstruction::CreateTranspose(shape, b_gte, {0, 1}));
HloInstruction* dot =
builder.AddInstruction(CreateDot(shape, a_trans, b_trans));
HloInstruction* root = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, dot, dot));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(PropagatePrecision(module.get()));
EXPECT_EQ(computation->root_instruction(), root);
EXPECT_TRUE(OutputsBF16(a_trans));
EXPECT_TRUE(OutputsBF16(b_trans));
EXPECT_FALSE(OutputsBF16(a_gte));
EXPECT_FALSE(OutputsBF16(b_gte));
EXPECT_FALSE(OutputsBF16(domain));
EXPECT_FALSE(OutputsBF16(param));
}
TEST_F(BFloat16PropagationTest, ConditionalSeparateBranchOperands) {
const std::string module_str = R"(
HloModule module
true_branch {
true_param = f32[4096,4096] parameter(0)
ROOT max = f32[4096,4096] maximum(true_param, true_param)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
ROOT add = f32[4096,4096] add(false_param, false_param)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
param1 = f32[4096,4096] parameter(1)
copy0 = f32[4096,4096] copy(param0)
copy1 = f32[4096,4096] copy(param1)
param2 = pred[] parameter(2)
conditional = f32[4096,4096] conditional(param2, copy0, copy1),
true_computation=true_branch, false_computation=false_branch
ROOT dot = f32[4096,4096] dot(conditional, conditional),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(PropagatePrecision(module.get()));
auto cond = FindInstruction(module.get(), "conditional");
auto copy0 = FindInstruction(module.get(), "copy0");
auto copy1 = FindInstruction(module.get(), "copy1");
EXPECT_TRUE(OutputsBF16(cond));
EXPECT_TRUE(OutputsBF16(copy0));
EXPECT_FALSE(OutputsBF16(copy1));
}
TEST_F(BFloat16PropagationTest, ConditionalSharedBranchOperands) {
const std::string module_str = R"(
HloModule module
true_branch {
true_param = f32[4096,4096] parameter(0)
ROOT max = f32[4096,4096] maximum(true_param, true_param)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
ROOT add = f32[4096,4096] add(false_param, false_param)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
copy0 = f32[4096,4096] copy(param0)
param1 = pred[] parameter(1)
conditional = f32[4096,4096] conditional(param1, copy0, copy0),
true_computation=true_branch, false_computation=false_branch
ROOT dot = f32[4096,4096] dot(conditional, conditional),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(PropagatePrecision(module.get()));
auto cond = FindInstruction(module.get(), "conditional");
auto copy0 = FindInstruction(module.get(), "copy0");
EXPECT_TRUE(OutputsBF16(cond));
EXPECT_FALSE(OutputsBF16(copy0));
}
TEST_F(BFloat16PropagationTest, ConditionalAliasingOutputs) {
const std::string module_str = R"(
HloModule module
true_branch {
true_param = f32[4096,4096] parameter(0)
max = f32[4096,4096] maximum(true_param, true_param)
ROOT true_tuple = (f32[4096,4096], f32[4096,4096]) tuple(max, max)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
min = f32[4096,4096] minimum(false_param, false_param)
max2 = f32[4096,4096] maximum(false_param, false_param)
ROOT false_tuple = (f32[4096,4096], f32[4096,4096]) tuple(min, max2)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
copy0 = f32[4096,4096] copy(param0)
param1 = pred[] parameter(1)
conditional = (f32[4096,4096], f32[4096,4096]) conditional(param1, copy0, copy0),
true_computation=true_branch, false_computation=false_branch
gte0 = f32[4096,4096] get-tuple-element(conditional), index=0
gte1 = f32[4096,4096] get-tuple-element(conditional), index=1
dot = f32[4096,4096] dot(gte0, gte1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT tuple = (f32[4096,4096], f32[4096,4096]) tuple(dot, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_FALSE(PropagatePrecision(module.get()));
}
TEST_F(BFloat16PropagationTest, DynamicUpdateSlice) {
const std::string module_str = R"(
HloModule Module
ENTRY main {
param = f32[128,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
dynamic-update-slice = f32[128,128] dynamic-update-slice(param, broadcast.6, constant.3, constant.3)
ROOT dot = f32[128,128] dot(dynamic-update-slice, dynamic-update-slice), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_FALSE(PropagatePrecision(module.get()));
HloInstruction* dus = module->entry_computation()->GetInstructionWithName(
"dynamic-update-slice");
EXPECT_FALSE(OutputsBF16(dus));
}
TEST_F(BFloat16PropagationTest, ConditionalGTEWithFusion) {
const std::string module_str = R"(
HloModule module
%add.0 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
%add.1 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
%add.2 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
%add.3 (x: f32[4096,4096], y: f32[4096,4096]) -> f32[4096,4096] {
x.1 = f32[4096,4096] parameter(0)
y.1 = f32[4096,4096] parameter(1)
ROOT dot1 = f32[4096,4096] dot(x.1, y.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
true_branch {
true_param = f32[4096,4096] parameter(0)
constant.1 = f32[4096,4096] constant(0)
add0 = f32[4096,4096] fusion(true_param,true_param), kind=kLoop, calls=add.0
constant.2 = f32[4096,4096] constant(0)
ROOT tuple.2 = (f32[4096,4096], f32[4096,4096], f32[4096,4096]) tuple(true_param,add0,constant.2)
}
false_branch {
false_param = f32[4096,4096] parameter(0)
add3 = f32[4096,4096] fusion(false_param,false_param), kind=kLoop, calls=add.1
constant.1 = f32[4096,4096] constant(0)
ROOT tuple.2 = (f32[4096,4096], f32[4096,4096], f32[4096,4096]) tuple(add3, add3,constant.1)
}
ENTRY entry {
param0 = f32[4096,4096] parameter(0)
copy0 = f32[4096,4096] copy(param0)
param1 = pred[] parameter(1)
conditional = (f32[4096,4096], f32[4096,4096], f32[4096,4096]) conditional(param1, param0, copy0),
true_computation=true_branch, false_computation=false_branch
gte = f32[4096,4096] get-tuple-element(conditional), index=0
gte1 = f32[4096,4096] get-tuple-element(conditional), index=1
gte2 = f32[4096,4096] get-tuple-element(conditional), index=2
add2 = f32[4096,4096] fusion(gte, gte1), kind=kLoop, calls=add.2
ROOT add3 = f32[4096,4096] fusion(add2, gte2), kind=kLoop, calls=add.3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(PropagatePrecision(module.get()));
VLOG(2) << module->ToString() << "\n";
EXPECT_TRUE(HloVerifier(false,
true)
.Run(module.get())
.status()
.ok());
auto gte = FindInstruction(module.get(), "gte");
auto gte1 = FindInstruction(module.get(), "gte1");
auto gte2 = FindInstruction(module.get(), "gte2");
EXPECT_FALSE(OutputsBF16(gte));
EXPECT_FALSE(OutputsBF16(gte1));
EXPECT_TRUE(OutputsBF16(gte2));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bfloat16_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f1cde24-9bd1-4c35-b0a6-b5d8de9285af | cpp | abseil/abseil-cpp | arg | absl/strings/internal/str_format/arg.cc | absl/strings/internal/str_format/arg_test.cc | #include "absl/strings/internal/str_format/arg.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <cwchar>
#include <string>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/optimization.h"
#include "absl/container/fixed_array.h"
#include "absl/numeric/int128.h"
#include "absl/strings/internal/str_format/extension.h"
#include "absl/strings/internal/str_format/float_conversion.h"
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
#if defined(ABSL_HAVE_STD_STRING_VIEW)
#include <string_view>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace str_format_internal {
namespace {
void ReducePadding(string_view s, size_t *capacity) {
*capacity = Excess(s.size(), *capacity);
}
void ReducePadding(size_t n, size_t *capacity) {
*capacity = Excess(n, *capacity);
}
template <typename T>
struct MakeUnsigned : std::make_unsigned<T> {};
template <>
struct MakeUnsigned<absl::int128> {
using type = absl::uint128;
};
template <>
struct MakeUnsigned<absl::uint128> {
using type = absl::uint128;
};
template <typename T>
struct IsSigned : std::is_signed<T> {};
template <>
struct IsSigned<absl::int128> : std::true_type {};
template <>
struct IsSigned<absl::uint128> : std::false_type {};
class IntDigits {
public:
template <typename T>
void PrintAsOct(T v) {
static_assert(!IsSigned<T>::value, "");
char *p = storage_ + sizeof(storage_);
do {
*--p = static_cast<char>('0' + (static_cast<size_t>(v) & 7));
v >>= 3;
} while (v);
start_ = p;
size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
}
template <typename T>
void PrintAsDec(T v) {
static_assert(std::is_integral<T>::value, "");
start_ = storage_;
size_ = static_cast<size_t>(numbers_internal::FastIntToBuffer(v, storage_) -
storage_);
}
void PrintAsDec(int128 v) {
auto u = static_cast<uint128>(v);
bool add_neg = false;
if (v < 0) {
add_neg = true;
u = uint128{} - u;
}
PrintAsDec(u, add_neg);
}
void PrintAsDec(uint128 v, bool add_neg = false) {
char *p = storage_ + sizeof(storage_);
do {
p -= 2;
numbers_internal::PutTwoDigits(static_cast<uint32_t>(v % 100), p);
v /= 100;
} while (v);
if (p[0] == '0') {
++p;
}
if (add_neg) {
*--p = '-';
}
size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
start_ = p;
}
template <typename T>
void PrintAsHexLower(T v) {
static_assert(!IsSigned<T>::value, "");
char *p = storage_ + sizeof(storage_);
do {
p -= 2;
constexpr const char* table = numbers_internal::kHexTable;
std::memcpy(p, table + 2 * (static_cast<size_t>(v) & 0xFF), 2);
if (sizeof(T) == 1) break;
v >>= 8;
} while (v);
if (p[0] == '0') {
++p;
}
start_ = p;
size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
}
template <typename T>
void PrintAsHexUpper(T v) {
static_assert(!IsSigned<T>::value, "");
char *p = storage_ + sizeof(storage_);
do {
*--p = "0123456789ABCDEF"[static_cast<size_t>(v) & 15];
v >>= 4;
} while (v);
start_ = p;
size_ = static_cast<size_t>(storage_ + sizeof(storage_) - p);
}
string_view with_neg_and_zero() const { return {start_, size_}; }
string_view without_neg_or_zero() const {
static_assert('-' < '0', "The check below verifies both.");
size_t advance = start_[0] <= '0' ? 1 : 0;
return {start_ + advance, size_ - advance};
}
bool is_negative() const { return start_[0] == '-'; }
private:
const char *start_;
size_t size_;
char storage_[128 / 3 + 1 + 1];
};
string_view BaseIndicator(const IntDigits &as_digits,
const FormatConversionSpecImpl conv) {
bool alt = conv.has_alt_flag() ||
conv.conversion_char() == FormatConversionCharInternal::p;
bool hex = (conv.conversion_char() == FormatConversionCharInternal::x ||
conv.conversion_char() == FormatConversionCharInternal::X ||
conv.conversion_char() == FormatConversionCharInternal::p);
if (alt && hex && !as_digits.without_neg_or_zero().empty()) {
return conv.conversion_char() == FormatConversionCharInternal::X ? "0X"
: "0x";
}
return {};
}
string_view SignColumn(bool neg, const FormatConversionSpecImpl conv) {
if (conv.conversion_char() == FormatConversionCharInternal::d ||
conv.conversion_char() == FormatConversionCharInternal::i) {
if (neg) return "-";
if (conv.has_show_pos_flag()) return "+";
if (conv.has_sign_col_flag()) return " ";
}
return {};
}
bool ConvertCharImpl(char v,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
size_t fill = 0;
if (conv.width() >= 0)
fill = static_cast<size_t>(conv.width());
ReducePadding(1, &fill);
if (!conv.has_left_flag()) sink->Append(fill, ' ');
sink->Append(1, v);
if (conv.has_left_flag()) sink->Append(fill, ' ');
return true;
}
bool ConvertIntImplInnerSlow(const IntDigits &as_digits,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
size_t fill = 0;
if (conv.width() >= 0)
fill = static_cast<size_t>(conv.width());
string_view formatted = as_digits.without_neg_or_zero();
ReducePadding(formatted, &fill);
string_view sign = SignColumn(as_digits.is_negative(), conv);
ReducePadding(sign, &fill);
string_view base_indicator = BaseIndicator(as_digits, conv);
ReducePadding(base_indicator, &fill);
bool precision_specified = conv.precision() >= 0;
size_t precision =
precision_specified ? static_cast<size_t>(conv.precision()) : size_t{1};
if (conv.has_alt_flag() &&
conv.conversion_char() == FormatConversionCharInternal::o) {
if (formatted.empty() || *formatted.begin() != '0') {
size_t needed = formatted.size() + 1;
precision = std::max(precision, needed);
}
}
size_t num_zeroes = Excess(formatted.size(), precision);
ReducePadding(num_zeroes, &fill);
size_t num_left_spaces = !conv.has_left_flag() ? fill : 0;
size_t num_right_spaces = conv.has_left_flag() ? fill : 0;
if (!precision_specified && conv.has_zero_flag()) {
num_zeroes += num_left_spaces;
num_left_spaces = 0;
}
sink->Append(num_left_spaces, ' ');
sink->Append(sign);
sink->Append(base_indicator);
sink->Append(num_zeroes, '0');
sink->Append(formatted);
sink->Append(num_right_spaces, ' ');
return true;
}
template <typename T>
bool ConvertFloatArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
if (conv.conversion_char() == FormatConversionCharInternal::v) {
conv.set_conversion_char(FormatConversionCharInternal::g);
}
return FormatConversionCharIsFloat(conv.conversion_char()) &&
ConvertFloatImpl(v, conv, sink);
}
inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
if (conv.is_basic()) {
sink->Append(v);
return true;
}
return sink->PutPaddedString(v, conv.width(), conv.precision(),
conv.has_left_flag());
}
struct ShiftState {
bool saw_high_surrogate = false;
uint8_t bits = 0;
};
inline size_t WideToUtf8(wchar_t wc, char *buf, ShiftState &s) {
const auto v = static_cast<uint32_t>(wc);
if (v < 0x80) {
*buf = static_cast<char>(v);
return 1;
} else if (v < 0x800) {
*buf++ = static_cast<char>(0xc0 | (v >> 6));
*buf = static_cast<char>(0x80 | (v & 0x3f));
return 2;
} else if (v < 0xd800 || (v - 0xe000) < 0x2000) {
*buf++ = static_cast<char>(0xe0 | (v >> 12));
*buf++ = static_cast<char>(0x80 | ((v >> 6) & 0x3f));
*buf = static_cast<char>(0x80 | (v & 0x3f));
return 3;
} else if ((v - 0x10000) < 0x100000) {
*buf++ = static_cast<char>(0xf0 | (v >> 18));
*buf++ = static_cast<char>(0x80 | ((v >> 12) & 0x3f));
*buf++ = static_cast<char>(0x80 | ((v >> 6) & 0x3f));
*buf = static_cast<char>(0x80 | (v & 0x3f));
return 4;
} else if (v < 0xdc00) {
s.saw_high_surrogate = true;
s.bits = static_cast<uint8_t>(v & 0x3);
const uint8_t high_bits = ((v >> 6) & 0xf) + 1;
*buf++ = static_cast<char>(0xf0 | (high_bits >> 2));
*buf =
static_cast<char>(0x80 | static_cast<uint8_t>((high_bits & 0x3) << 4) |
static_cast<uint8_t>((v >> 2) & 0xf));
return 2;
} else if (v < 0xe000 && s.saw_high_surrogate) {
*buf++ = static_cast<char>(0x80 | static_cast<uint8_t>(s.bits << 4) |
static_cast<uint8_t>((v >> 6) & 0xf));
*buf = static_cast<char>(0x80 | (v & 0x3f));
s.saw_high_surrogate = false;
s.bits = 0;
return 2;
} else {
return static_cast<size_t>(-1);
}
}
inline bool ConvertStringArg(const wchar_t *v,
size_t len,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
FixedArray<char> mb(len * 4);
ShiftState s;
size_t chars_written = 0;
for (size_t i = 0; i < len; ++i) {
const size_t chars = WideToUtf8(v[i], &mb[chars_written], s);
if (chars == static_cast<size_t>(-1)) { return false; }
chars_written += chars;
}
return ConvertStringArg(string_view(mb.data(), chars_written), conv, sink);
}
bool ConvertWCharTImpl(wchar_t v, const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
char mb[4];
ShiftState s;
const size_t chars_written = WideToUtf8(v, mb, s);
return chars_written != static_cast<size_t>(-1) && !s.saw_high_surrogate &&
ConvertStringArg(string_view(mb, chars_written), conv, sink);
}
}
bool ConvertBoolArg(bool v, FormatSinkImpl *sink) {
if (v) {
sink->Append("true");
} else {
sink->Append("false");
}
return true;
}
template <typename T>
bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
using U = typename MakeUnsigned<T>::type;
IntDigits as_digits;
switch (static_cast<uint8_t>(conv.conversion_char())) {
case static_cast<uint8_t>(FormatConversionCharInternal::c):
return (std::is_same<T, wchar_t>::value ||
(conv.length_mod() == LengthMod::l))
? ConvertWCharTImpl(static_cast<wchar_t>(v), conv, sink)
: ConvertCharImpl(static_cast<char>(v), conv, sink);
case static_cast<uint8_t>(FormatConversionCharInternal::o):
as_digits.PrintAsOct(static_cast<U>(v));
break;
case static_cast<uint8_t>(FormatConversionCharInternal::x):
as_digits.PrintAsHexLower(static_cast<U>(v));
break;
case static_cast<uint8_t>(FormatConversionCharInternal::X):
as_digits.PrintAsHexUpper(static_cast<U>(v));
break;
case static_cast<uint8_t>(FormatConversionCharInternal::u):
as_digits.PrintAsDec(static_cast<U>(v));
break;
case static_cast<uint8_t>(FormatConversionCharInternal::d):
case static_cast<uint8_t>(FormatConversionCharInternal::i):
case static_cast<uint8_t>(FormatConversionCharInternal::v):
as_digits.PrintAsDec(v);
break;
case static_cast<uint8_t>(FormatConversionCharInternal::a):
case static_cast<uint8_t>(FormatConversionCharInternal::e):
case static_cast<uint8_t>(FormatConversionCharInternal::f):
case static_cast<uint8_t>(FormatConversionCharInternal::g):
case static_cast<uint8_t>(FormatConversionCharInternal::A):
case static_cast<uint8_t>(FormatConversionCharInternal::E):
case static_cast<uint8_t>(FormatConversionCharInternal::F):
case static_cast<uint8_t>(FormatConversionCharInternal::G):
return ConvertFloatImpl(static_cast<double>(v), conv, sink);
default:
ABSL_ASSUME(false);
}
if (conv.is_basic()) {
sink->Append(as_digits.with_neg_and_zero());
return true;
}
return ConvertIntImplInnerSlow(as_digits, conv, sink);
}
template bool ConvertIntArg<char>(char v, FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<signed char>(signed char v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<unsigned char>(unsigned char v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<wchar_t>(wchar_t v, FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<short>(short v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<unsigned short>(unsigned short v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<int>(int v, FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<unsigned int>(unsigned int v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<long>(long v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<unsigned long>(unsigned long v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<long long>(long long v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
template bool ConvertIntArg<unsigned long long>(unsigned long long v,
FormatConversionSpecImpl conv,
FormatSinkImpl *sink);
StringConvertResult FormatConvertImpl(const std::string &v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertStringArg(v, conv, sink)};
}
StringConvertResult FormatConvertImpl(const std::wstring &v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertStringArg(v.data(), v.size(), conv, sink)};
}
StringConvertResult FormatConvertImpl(string_view v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertStringArg(v, conv, sink)};
}
#if defined(ABSL_HAVE_STD_STRING_VIEW)
StringConvertResult FormatConvertImpl(std::wstring_view v,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
return {ConvertStringArg(v.data(), v.size(), conv, sink)};
}
#endif
StringPtrConvertResult FormatConvertImpl(const char* v,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
if (conv.conversion_char() == FormatConversionCharInternal::p)
return {FormatConvertImpl(VoidPtr(v), conv, sink).value};
size_t len;
if (v == nullptr) {
len = 0;
} else if (conv.precision() < 0) {
len = std::strlen(v);
} else {
len = static_cast<size_t>(std::find(v, v + conv.precision(), '\0') - v);
}
return {ConvertStringArg(string_view(v, len), conv, sink)};
}
StringPtrConvertResult FormatConvertImpl(const wchar_t* v,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
if (conv.conversion_char() == FormatConversionCharInternal::p) {
return {FormatConvertImpl(VoidPtr(v), conv, sink).value};
}
size_t len;
if (v == nullptr) {
len = 0;
} else if (conv.precision() < 0) {
len = std::wcslen(v);
} else {
len = static_cast<size_t>(std::find(v, v + conv.precision(), L'\0') - v);
}
return {ConvertStringArg(v, len, conv, sink)};
}
StringPtrConvertResult FormatConvertImpl(std::nullptr_t,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
return FormatConvertImpl(static_cast<const char*>(nullptr), conv, sink);
}
ArgConvertResult<FormatConversionCharSetInternal::p> FormatConvertImpl(
VoidPtr v, const FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
if (!v.value) {
sink->Append("(nil)");
return {true};
}
IntDigits as_digits;
as_digits.PrintAsHexLower(v.value);
return {ConvertIntImplInnerSlow(as_digits, conv, sink)};
}
FloatingConvertResult FormatConvertImpl(float v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertFloatArg(v, conv, sink)};
}
FloatingConvertResult FormatConvertImpl(double v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertFloatArg(v, conv, sink)};
}
FloatingConvertResult FormatConvertImpl(long double v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertFloatArg(v, conv, sink)};
}
CharConvertResult FormatConvertImpl(char v, const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
CharConvertResult FormatConvertImpl(wchar_t v,
const FormatConversionSpecImpl conv,
FormatSinkImpl* sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(signed char v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(unsigned char v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(short v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(unsigned short v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(int v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(unsigned v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(long v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(unsigned long v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(long long v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(unsigned long long v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(absl::int128 v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
IntegralConvertResult FormatConvertImpl(absl::uint128 v,
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
ABSL_INTERNAL_FORMAT_DISPATCH_OVERLOADS_EXPAND_();
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/str_format/arg.h"
#include <limits>
#include <string>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/strings/str_format.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace str_format_internal {
namespace {
class FormatArgImplTest : public ::testing::Test {
public:
enum Color { kRed, kGreen, kBlue };
static const char *hi() { return "hi"; }
struct X {};
X x_;
};
inline FormatConvertResult<FormatConversionCharSet{}> AbslFormatConvert(
const FormatArgImplTest::X &, const FormatConversionSpec &, FormatSink *) {
return {false};
}
TEST_F(FormatArgImplTest, ToInt) {
int out = 0;
EXPECT_TRUE(FormatArgImplFriend::ToInt(FormatArgImpl(1), &out));
EXPECT_EQ(1, out);
EXPECT_TRUE(FormatArgImplFriend::ToInt(FormatArgImpl(-1), &out));
EXPECT_EQ(-1, out);
EXPECT_TRUE(
FormatArgImplFriend::ToInt(FormatArgImpl(static_cast<char>(64)), &out));
EXPECT_EQ(64, out);
EXPECT_TRUE(FormatArgImplFriend::ToInt(
FormatArgImpl(static_cast<unsigned long long>(123456)), &out));
EXPECT_EQ(123456, out);
EXPECT_TRUE(FormatArgImplFriend::ToInt(
FormatArgImpl(static_cast<unsigned long long>(
std::numeric_limits<int>::max()) +
1),
&out));
EXPECT_EQ(std::numeric_limits<int>::max(), out);
EXPECT_TRUE(FormatArgImplFriend::ToInt(
FormatArgImpl(static_cast<long long>(
std::numeric_limits<int>::min()) -
10),
&out));
EXPECT_EQ(std::numeric_limits<int>::min(), out);
EXPECT_TRUE(FormatArgImplFriend::ToInt(FormatArgImpl(false), &out));
EXPECT_EQ(0, out);
EXPECT_TRUE(FormatArgImplFriend::ToInt(FormatArgImpl(true), &out));
EXPECT_EQ(1, out);
EXPECT_FALSE(FormatArgImplFriend::ToInt(FormatArgImpl(2.2), &out));
EXPECT_FALSE(FormatArgImplFriend::ToInt(FormatArgImpl(3.2f), &out));
EXPECT_FALSE(FormatArgImplFriend::ToInt(
FormatArgImpl(static_cast<int *>(nullptr)), &out));
EXPECT_FALSE(FormatArgImplFriend::ToInt(FormatArgImpl(hi()), &out));
EXPECT_FALSE(FormatArgImplFriend::ToInt(FormatArgImpl("hi"), &out));
EXPECT_FALSE(FormatArgImplFriend::ToInt(FormatArgImpl(x_), &out));
EXPECT_TRUE(FormatArgImplFriend::ToInt(FormatArgImpl(kBlue), &out));
EXPECT_EQ(2, out);
}
extern const char kMyArray[];
TEST_F(FormatArgImplTest, CharArraysDecayToCharPtr) {
const char* a = "";
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl("")));
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl("A")));
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl("ABC")));
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(kMyArray)));
}
extern const wchar_t kMyWCharTArray[];
TEST_F(FormatArgImplTest, WCharTArraysDecayToWCharTPtr) {
const wchar_t* a = L"";
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(L"")));
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(L"A")));
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(L"ABC")));
EXPECT_EQ(
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(a)),
FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(kMyWCharTArray)));
}
TEST_F(FormatArgImplTest, OtherPtrDecayToVoidPtr) {
auto expected = FormatArgImplFriend::GetVTablePtrForTest(
FormatArgImpl(static_cast<void *>(nullptr)));
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(
FormatArgImpl(static_cast<int *>(nullptr))),
expected);
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(
FormatArgImpl(static_cast<volatile int *>(nullptr))),
expected);
auto p = static_cast<void (*)()>([] {});
EXPECT_EQ(FormatArgImplFriend::GetVTablePtrForTest(FormatArgImpl(p)),
expected);
}
TEST_F(FormatArgImplTest, WorksWithCharArraysOfUnknownSize) {
std::string s;
FormatSinkImpl sink(&s);
FormatConversionSpecImpl conv;
FormatConversionSpecImplFriend::SetConversionChar(
FormatConversionCharInternal::s, &conv);
FormatConversionSpecImplFriend::SetFlags(Flags(), &conv);
FormatConversionSpecImplFriend::SetWidth(-1, &conv);
FormatConversionSpecImplFriend::SetPrecision(-1, &conv);
EXPECT_TRUE(
FormatArgImplFriend::Convert(FormatArgImpl(kMyArray), conv, &sink));
sink.Flush();
EXPECT_EQ("ABCDE", s);
}
const char kMyArray[] = "ABCDE";
TEST_F(FormatArgImplTest, WorksWithWCharTArraysOfUnknownSize) {
std::string s;
FormatSinkImpl sink(&s);
FormatConversionSpecImpl conv;
FormatConversionSpecImplFriend::SetConversionChar(
FormatConversionCharInternal::s, &conv);
FormatConversionSpecImplFriend::SetFlags(Flags(), &conv);
FormatConversionSpecImplFriend::SetWidth(-1, &conv);
FormatConversionSpecImplFriend::SetPrecision(-1, &conv);
EXPECT_TRUE(
FormatArgImplFriend::Convert(FormatArgImpl(kMyWCharTArray), conv, &sink));
sink.Flush();
EXPECT_EQ("ABCDE", s);
}
const wchar_t kMyWCharTArray[] = L"ABCDE";
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/str_format/arg.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/str_format/arg_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
2801fa6d-a374-41d2-8d79-f378f788147e | cpp | google/arolla | bitwise | arolla/qexpr/operators/bitwise/bitwise.h | arolla/qexpr/operators/bitwise/bitwise_test.cc | #ifndef AROLLA_QEXPR_OPERATORS_BITWISE_BITWISE_H_
#define AROLLA_QEXPR_OPERATORS_BITWISE_BITWISE_H_
#include <type_traits>
namespace arolla {
struct BitwiseAndOp {
using run_on_missing = std::true_type;
template <typename T>
T operator()(T lhs, T rhs) const {
return lhs & rhs;
}
};
struct BitwiseOrOp {
using run_on_missing = std::true_type;
template <typename T>
T operator()(T lhs, T rhs) const {
return lhs | rhs;
}
};
struct BitwiseXorOp {
using run_on_missing = std::true_type;
template <typename T>
T operator()(T lhs, T rhs) const {
return lhs ^ rhs;
}
};
struct InvertOp {
using run_on_missing = std::true_type;
template <typename T>
T operator()(T x) const {
return ~x;
}
};
}
#endif | #include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
TEST(BitwiseOperatorsTest, BitwiseAnd) {
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_and", int32_t{5}, int32_t{17}),
IsOkAndHolds(1));
EXPECT_THAT(
InvokeOperator<int64_t>("bitwise.bitwise_and", int64_t{5}, int64_t{17}),
IsOkAndHolds(1));
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_and", int32_t{-2}, int32_t{17}),
IsOkAndHolds(16));
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_and", int32_t{-2}, int32_t{-2}),
IsOkAndHolds(-2));
}
TEST(BitwiseOperatorsTest, BitwiseOr) {
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_or", int32_t{5}, int32_t{17}),
IsOkAndHolds(21));
EXPECT_THAT(
InvokeOperator<int64_t>("bitwise.bitwise_or", int64_t{5}, int64_t{17}),
IsOkAndHolds(21));
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_or", int32_t{-2}, int32_t{17}),
IsOkAndHolds(-1));
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_or", int32_t{-2}, int32_t{-2}),
IsOkAndHolds(-2));
}
TEST(BitwiseOperatorsTest, BitwiseXor) {
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_xor", int32_t{5}, int32_t{17}),
IsOkAndHolds(20));
EXPECT_THAT(
InvokeOperator<int64_t>("bitwise.bitwise_xor", int64_t{5}, int64_t{17}),
IsOkAndHolds(20));
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_xor", int32_t{-2}, int32_t{17}),
IsOkAndHolds(-17));
EXPECT_THAT(
InvokeOperator<int32_t>("bitwise.bitwise_xor", int32_t{-2}, int32_t{-2}),
IsOkAndHolds(0));
}
TEST(BitwiseOperatorsTest, Invert) {
EXPECT_THAT(InvokeOperator<int32_t>("bitwise.invert", int32_t{5}),
IsOkAndHolds(-6));
EXPECT_THAT(InvokeOperator<int64_t>("bitwise.invert", int64_t{5}),
IsOkAndHolds(-6));
EXPECT_THAT(InvokeOperator<int32_t>("bitwise.invert", int32_t{-2}),
IsOkAndHolds(1));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/bitwise/bitwise.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/bitwise/bitwise_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
28dc5ec3-deef-479b-9dd9-8be3d29407df | cpp | abseil/abseil-cpp | hash | absl/hash/internal/hash.cc | absl/hash/hash_test.cc | #include "absl/hash/internal/hash.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
uint64_t MixingHashState::CombineLargeContiguousImpl32(
uint64_t state, const unsigned char* first, size_t len) {
while (len >= PiecewiseChunkSize()) {
state = Mix(state,
hash_internal::CityHash32(reinterpret_cast<const char*>(first),
PiecewiseChunkSize()));
len -= PiecewiseChunkSize();
first += PiecewiseChunkSize();
}
return CombineContiguousImpl(state, first, len,
std::integral_constant<int, 4>{});
}
uint64_t MixingHashState::CombineLargeContiguousImpl64(
uint64_t state, const unsigned char* first, size_t len) {
while (len >= PiecewiseChunkSize()) {
state = Mix(state, Hash64(first, PiecewiseChunkSize()));
len -= PiecewiseChunkSize();
first += PiecewiseChunkSize();
}
return CombineContiguousImpl(state, first, len,
std::integral_constant<int, 8>{});
}
ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed;
constexpr uint64_t kHashSalt[5] = {
uint64_t{0x243F6A8885A308D3}, uint64_t{0x13198A2E03707344},
uint64_t{0xA4093822299F31D0}, uint64_t{0x082EFA98EC4E6C89},
uint64_t{0x452821E638D01377},
};
uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data,
size_t len) {
return LowLevelHashLenGt16(data, len, Seed(), kHashSalt);
}
}
ABSL_NAMESPACE_END
} | #include "absl/hash/hash.h"
#include <algorithm>
#include <array>
#include <bitset>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <ios>
#include <limits>
#include <memory>
#include <ostream>
#include <set>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash_testing.h"
#include "absl/hash/internal/hash_test.h"
#include "absl/hash/internal/spy_hash_state.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#ifdef ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE
#include <filesystem>
#endif
#ifdef ABSL_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
namespace {
using ::absl::hash_test_internal::is_hashable;
using ::absl::hash_test_internal::TypeErasedContainer;
using ::absl::hash_test_internal::TypeErasedValue;
template <typename T>
using TypeErasedVector = TypeErasedContainer<std::vector<T>>;
using absl::Hash;
using absl::hash_internal::SpyHashState;
template <typename T>
class HashValueIntTest : public testing::Test {
};
TYPED_TEST_SUITE_P(HashValueIntTest);
template <typename T>
SpyHashState SpyHash(const T& value) {
return SpyHashState::combine(SpyHashState(), value);
}
TYPED_TEST_P(HashValueIntTest, BasicUsage) {
EXPECT_TRUE((is_hashable<TypeParam>::value));
TypeParam n = 42;
EXPECT_EQ(SpyHash(n), SpyHash(TypeParam{42}));
EXPECT_NE(SpyHash(n), SpyHash(TypeParam{0}));
EXPECT_NE(SpyHash(std::numeric_limits<TypeParam>::max()),
SpyHash(std::numeric_limits<TypeParam>::min()));
}
TYPED_TEST_P(HashValueIntTest, FastPath) {
TypeParam n = 42;
EXPECT_EQ(absl::Hash<TypeParam>{}(n),
absl::Hash<std::tuple<TypeParam>>{}(std::tuple<TypeParam>(n)));
}
REGISTER_TYPED_TEST_SUITE_P(HashValueIntTest, BasicUsage, FastPath);
using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t,
uint32_t, uint64_t, size_t>;
INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueIntTest, IntTypes);
enum LegacyEnum { kValue1, kValue2, kValue3 };
enum class EnumClass { kValue4, kValue5, kValue6 };
TEST(HashValueTest, EnumAndBool) {
EXPECT_TRUE((is_hashable<LegacyEnum>::value));
EXPECT_TRUE((is_hashable<EnumClass>::value));
EXPECT_TRUE((is_hashable<bool>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
LegacyEnum::kValue1, LegacyEnum::kValue2, LegacyEnum::kValue3)));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
EnumClass::kValue4, EnumClass::kValue5, EnumClass::kValue6)));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(true, false)));
}
TEST(HashValueTest, FloatingPoint) {
EXPECT_TRUE((is_hashable<float>::value));
EXPECT_TRUE((is_hashable<double>::value));
EXPECT_TRUE((is_hashable<long double>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(42.f, 0.f, -0.f, std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity())));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(42., 0., -0., std::numeric_limits<double>::infinity(),
-std::numeric_limits<double>::infinity())));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
.5L, 1.L, 2.L, 4.L, 42.L, 0.L, -0.L,
17 * static_cast<long double>(std::numeric_limits<double>::max()),
std::numeric_limits<long double>::infinity(),
-std::numeric_limits<long double>::infinity())));
}
TEST(HashValueTest, Pointer) {
EXPECT_TRUE((is_hashable<int*>::value));
EXPECT_TRUE((is_hashable<int(*)(char, float)>::value));
EXPECT_TRUE((is_hashable<void(*)(int, int, ...)>::value));
int i;
int* ptr = &i;
int* n = nullptr;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(&i, ptr, nullptr, ptr + 1, n)));
}
TEST(HashValueTest, PointerAlignment) {
constexpr size_t kTotalSize = 1 << 20;
std::unique_ptr<char[]> data(new char[kTotalSize]);
constexpr size_t kLog2NumValues = 5;
constexpr size_t kNumValues = 1 << kLog2NumValues;
for (size_t align = 1; align < kTotalSize / kNumValues;
align < 8 ? align += 1 : align < 1024 ? align += 8 : align += 32) {
SCOPED_TRACE(align);
ASSERT_LE(align * kNumValues, kTotalSize);
size_t bits_or = 0;
size_t bits_and = ~size_t{};
for (size_t i = 0; i < kNumValues; ++i) {
size_t hash = absl::Hash<void*>()(data.get() + i * align);
bits_or |= hash;
bits_and &= hash;
}
constexpr size_t kMask = (1 << (kLog2NumValues + 7)) - 1;
size_t stuck_bits = (~bits_or | bits_and) & kMask;
EXPECT_EQ(stuck_bits, 0u) << "0x" << std::hex << stuck_bits;
}
}
TEST(HashValueTest, PointerToMember) {
struct Bass {
void q() {}
};
struct A : Bass {
virtual ~A() = default;
virtual void vfa() {}
static auto pq() -> void (A::*)() { return &A::q; }
};
struct B : Bass {
virtual ~B() = default;
virtual void vfb() {}
static auto pq() -> void (B::*)() { return &B::q; }
};
struct Foo : A, B {
void f1() {}
void f2() const {}
int g1() & { return 0; }
int g2() const & { return 0; }
int g3() && { return 0; }
int g4() const && { return 0; }
int h1() & { return 0; }
int h2() const & { return 0; }
int h3() && { return 0; }
int h4() const && { return 0; }
int a;
int b;
const int c = 11;
const int d = 22;
};
EXPECT_TRUE((is_hashable<float Foo::*>::value));
EXPECT_TRUE((is_hashable<double (Foo::*)(int, int)&&>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(&Foo::a, &Foo::b, static_cast<int Foo::*>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(&Foo::c, &Foo::d, static_cast<const int Foo::*>(nullptr),
&Foo::a, &Foo::b)));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
&Foo::f1, static_cast<void (Foo::*)()>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
&Foo::f2, static_cast<void (Foo::*)() const>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
&Foo::g1, &Foo::h1, static_cast<int (Foo::*)() &>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
&Foo::g2, &Foo::h2, static_cast<int (Foo::*)() const &>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
&Foo::g3, &Foo::h3, static_cast<int (Foo::*)() &&>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
&Foo::g4, &Foo::h4, static_cast<int (Foo::*)() const &&>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(static_cast<void (Foo::*)()>(&Foo::vfa),
static_cast<void (Foo::*)()>(&Foo::vfb),
static_cast<void (Foo::*)()>(nullptr))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(static_cast<void (Foo::*)()>(Foo::A::pq()),
static_cast<void (Foo::*)()>(Foo::B::pq()),
static_cast<void (Foo::*)()>(nullptr))));
}
TEST(HashValueTest, PairAndTuple) {
EXPECT_TRUE((is_hashable<std::pair<int, int>>::value));
EXPECT_TRUE((is_hashable<std::pair<const int&, const int&>>::value));
EXPECT_TRUE((is_hashable<std::tuple<int&, int&>>::value));
EXPECT_TRUE((is_hashable<std::tuple<int&&, int&&>>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::make_pair(0, 42), std::make_pair(0, 42), std::make_pair(42, 0),
std::make_pair(0, 0), std::make_pair(42, 42), std::make_pair(1, 42))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(std::make_tuple(0, 0, 0), std::make_tuple(0, 0, 42),
std::make_tuple(0, 23, 0), std::make_tuple(17, 0, 0),
std::make_tuple(42, 0, 0), std::make_tuple(3, 9, 9),
std::make_tuple(0, 0, -42))));
int a = 0, b = 1, c = 17, d = 23;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::tie(a, a), std::tie(a, b), std::tie(b, c), std::tie(c, d))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::forward_as_tuple(0, 0, 0), std::forward_as_tuple(0, 0, 42),
std::forward_as_tuple(0, 23, 0), std::forward_as_tuple(17, 0, 0),
std::forward_as_tuple(42, 0, 0), std::forward_as_tuple(3, 9, 9),
std::forward_as_tuple(0, 0, -42))));
}
TEST(HashValueTest, CombineContiguousWorks) {
std::vector<std::tuple<int>> v1 = {std::make_tuple(1), std::make_tuple(3)};
std::vector<std::tuple<int>> v2 = {std::make_tuple(1), std::make_tuple(2)};
auto vh1 = SpyHash(v1);
auto vh2 = SpyHash(v2);
EXPECT_NE(vh1, vh2);
}
struct DummyDeleter {
template <typename T>
void operator() (T* ptr) {}
};
struct SmartPointerEq {
template <typename T, typename U>
bool operator()(const T& t, const U& u) const {
return GetPtr(t) == GetPtr(u);
}
template <typename T>
static auto GetPtr(const T& t) -> decltype(&*t) {
return t ? &*t : nullptr;
}
static std::nullptr_t GetPtr(std::nullptr_t) { return nullptr; }
};
TEST(HashValueTest, SmartPointers) {
EXPECT_TRUE((is_hashable<std::unique_ptr<int>>::value));
EXPECT_TRUE((is_hashable<std::unique_ptr<int, DummyDeleter>>::value));
EXPECT_TRUE((is_hashable<std::shared_ptr<int>>::value));
int i, j;
std::unique_ptr<int, DummyDeleter> unique1(&i);
std::unique_ptr<int, DummyDeleter> unique2(&i);
std::unique_ptr<int, DummyDeleter> unique_other(&j);
std::unique_ptr<int, DummyDeleter> unique_null;
std::shared_ptr<int> shared1(&i, DummyDeleter());
std::shared_ptr<int> shared2(&i, DummyDeleter());
std::shared_ptr<int> shared_other(&j, DummyDeleter());
std::shared_ptr<int> shared_null;
ASSERT_TRUE(SmartPointerEq{}(unique1, shared1));
ASSERT_FALSE(SmartPointerEq{}(unique1, shared_other));
ASSERT_TRUE(SmartPointerEq{}(unique_null, nullptr));
ASSERT_FALSE(SmartPointerEq{}(shared2, nullptr));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::forward_as_tuple(&i, nullptr,
unique1, unique2, unique_null,
absl::make_unique<int>(),
shared1, shared2, shared_null,
std::make_shared<int>()),
SmartPointerEq{}));
}
TEST(HashValueTest, FunctionPointer) {
using Func = int (*)();
EXPECT_TRUE(is_hashable<Func>::value);
Func p1 = [] { return 2; }, p2 = [] { return 1; };
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(p1, p2, nullptr)));
}
struct WrapInTuple {
template <typename T>
std::tuple<int, T, size_t> operator()(const T& t) const {
return std::make_tuple(7, t, 0xdeadbeef);
}
};
absl::Cord FlatCord(absl::string_view sv) {
absl::Cord c(sv);
c.Flatten();
return c;
}
absl::Cord FragmentedCord(absl::string_view sv) {
if (sv.size() < 2) {
return absl::Cord(sv);
}
size_t halfway = sv.size() / 2;
std::vector<absl::string_view> parts = {sv.substr(0, halfway),
sv.substr(halfway)};
return absl::MakeFragmentedCord(parts);
}
TEST(HashValueTest, Strings) {
EXPECT_TRUE((is_hashable<std::string>::value));
const std::string small = "foo";
const std::string dup = "foofoo";
const std::string large = std::string(2048, 'x');
const std::string huge = std::string(5000, 'a');
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::string(), absl::string_view(), absl::Cord(),
std::string(""), absl::string_view(""), absl::Cord(""),
std::string(small), absl::string_view(small), absl::Cord(small),
std::string(dup), absl::string_view(dup), absl::Cord(dup),
std::string(large), absl::string_view(large), absl::Cord(large),
std::string(huge), absl::string_view(huge), FlatCord(huge),
FragmentedCord(huge))));
const WrapInTuple t{};
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
t(std::string()), t(absl::string_view()), t(absl::Cord()),
t(std::string("")), t(absl::string_view("")), t(absl::Cord("")),
t(std::string(small)), t(absl::string_view(small)),
t(absl::Cord(small)),
t(std::string(dup)), t(absl::string_view(dup)), t(absl::Cord(dup)),
t(std::string(large)), t(absl::string_view(large)),
t(absl::Cord(large)),
t(std::string(huge)), t(absl::string_view(huge)),
t(FlatCord(huge)), t(FragmentedCord(huge)))));
EXPECT_NE(SpyHash(static_cast<const char*>("ABC")),
SpyHash(absl::string_view("ABC")));
}
TEST(HashValueTest, WString) {
EXPECT_TRUE((is_hashable<std::wstring>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::wstring(), std::wstring(L"ABC"), std::wstring(L"ABC"),
std::wstring(L"Some other different string"),
std::wstring(L"Iñtërnâtiônàlizætiøn"))));
}
TEST(HashValueTest, U16String) {
EXPECT_TRUE((is_hashable<std::u16string>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::u16string(), std::u16string(u"ABC"), std::u16string(u"ABC"),
std::u16string(u"Some other different string"),
std::u16string(u"Iñtërnâtiônàlizætiøn"))));
}
TEST(HashValueTest, U32String) {
EXPECT_TRUE((is_hashable<std::u32string>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::u32string(), std::u32string(U"ABC"), std::u32string(U"ABC"),
std::u32string(U"Some other different string"),
std::u32string(U"Iñtërnâtiônàlizætiøn"))));
}
TEST(HashValueTest, WStringView) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
EXPECT_TRUE((is_hashable<std::wstring_view>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
std::wstring_view(), std::wstring_view(L"ABC"), std::wstring_view(L"ABC"),
std::wstring_view(L"Some other different string_view"),
std::wstring_view(L"Iñtërnâtiônàlizætiøn"))));
#endif
}
TEST(HashValueTest, U16StringView) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
EXPECT_TRUE((is_hashable<std::u16string_view>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(std::u16string_view(), std::u16string_view(u"ABC"),
std::u16string_view(u"ABC"),
std::u16string_view(u"Some other different string_view"),
std::u16string_view(u"Iñtërnâtiônàlizætiøn"))));
#endif
}
TEST(HashValueTest, U32StringView) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
EXPECT_TRUE((is_hashable<std::u32string_view>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(std::u32string_view(), std::u32string_view(U"ABC"),
std::u32string_view(U"ABC"),
std::u32string_view(U"Some other different string_view"),
std::u32string_view(U"Iñtërnâtiônàlizætiøn"))));
#endif
}
TEST(HashValueTest, StdFilesystemPath) {
#ifndef ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE
GTEST_SKIP() << "std::filesystem::path is unavailable on this platform";
#else
EXPECT_TRUE((is_hashable<std::filesystem::path>::value));
const auto kTestCases = std::make_tuple(
std::filesystem::path(),
std::filesystem::path("/"),
#ifndef __GLIBCXX__
std::filesystem::path("
#endif
std::filesystem::path("/a/b"),
std::filesystem::path("/a
std::filesystem::path("a/b"),
std::filesystem::path("a/b/"),
std::filesystem::path("a
std::filesystem::path("a
std::filesystem::path("c:/"),
std::filesystem::path("c:\\"),
std::filesystem::path("c:\\/"),
std::filesystem::path("c:\\
std::filesystem::path("c:
std::filesystem::path("c:
std::filesystem::path("/e/p"),
std::filesystem::path("/s/../e/p"),
std::filesystem::path("e/p"),
std::filesystem::path("s/../e/p"));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(kTestCases));
#endif
}
TEST(HashValueTest, StdArray) {
EXPECT_TRUE((is_hashable<std::array<int, 3>>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(std::array<int, 3>{}, std::array<int, 3>{{0, 23, 42}})));
}
TEST(HashValueTest, StdBitset) {
EXPECT_TRUE((is_hashable<std::bitset<257>>::value));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{std::bitset<2>("00"), std::bitset<2>("01"), std::bitset<2>("10"),
std::bitset<2>("11")}));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{std::bitset<5>("10101"), std::bitset<5>("10001"), std::bitset<5>()}));
constexpr int kNumBits = 256;
std::array<std::string, 6> bit_strings;
bit_strings.fill(std::string(kNumBits, '1'));
bit_strings[1][0] = '0';
bit_strings[2][1] = '0';
bit_strings[3][kNumBits / 3] = '0';
bit_strings[4][kNumBits - 2] = '0';
bit_strings[5][kNumBits - 1] = '0';
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{std::bitset<kNumBits>(bit_strings[0].c_str()),
std::bitset<kNumBits>(bit_strings[1].c_str()),
std::bitset<kNumBits>(bit_strings[2].c_str()),
std::bitset<kNumBits>(bit_strings[3].c_str()),
std::bitset<kNumBits>(bit_strings[4].c_str()),
std::bitset<kNumBits>(bit_strings[5].c_str())}));
}
struct Private {
int i;
template <typename H>
friend H AbslHashValue(H h, Private p) {
return H::combine(std::move(h), std::abs(p.i));
}
friend bool operator==(Private a, Private b) {
return std::abs(a.i) == std::abs(b.i);
}
friend std::ostream& operator<<(std::ostream& o, Private p) {
return o << p.i;
}
};
class PiecewiseHashTester {
public:
explicit PiecewiseHashTester(absl::string_view buf)
: buf_(buf), piecewise_(false), split_locations_() {}
PiecewiseHashTester(absl::string_view buf, std::set<size_t> split_locations)
: buf_(buf),
piecewise_(true),
split_locations_(std::move(split_locations)) {}
template <typename H>
friend H AbslHashValue(H h, const PiecewiseHashTester& p) {
if (!p.piecewise_) {
return H::combine_contiguous(std::move(h), p.buf_.data(), p.buf_.size());
}
absl::hash_internal::PiecewiseCombiner combiner;
if (p.split_locations_.empty()) {
h = combiner.add_buffer(std::move(h), p.buf_.data(), p.buf_.size());
return combiner.finalize(std::move(h));
}
size_t begin = 0;
for (size_t next : p.split_locations_) {
absl::string_view chunk = p.buf_.substr(begin, next - begin);
h = combiner.add_buffer(std::move(h), chunk.data(), chunk.size());
begin = next;
}
absl::string_view last_chunk = p.buf_.substr(begin);
if (!last_chunk.empty()) {
h = combiner.add_buffer(std::move(h), last_chunk.data(),
last_chunk.size());
}
return combiner.finalize(std::move(h));
}
private:
absl::string_view buf_;
bool piecewise_;
std::set<size_t> split_locations_;
};
struct DummyFooBar {
template <typename H>
friend H AbslHashValue(H h, const DummyFooBar&) {
const char* foo = "foo";
const char* bar = "bar";
h = H::combine_contiguous(std::move(h), foo, 3);
h = H::combine_contiguous(std::move(h), bar, 3);
return h;
}
};
TEST(HashValueTest, CombinePiecewiseBuffer) {
absl::Hash<PiecewiseHashTester> hash;
EXPECT_EQ(hash(PiecewiseHashTester("")), hash(PiecewiseHashTester("", {})));
EXPECT_EQ(hash(PiecewiseHashTester("foobar")),
hash(PiecewiseHashTester("foobar", {})));
EXPECT_EQ(hash(PiecewiseHashTester("foobar")),
hash(PiecewiseHashTester("foobar", {3})));
EXPECT_NE(hash(PiecewiseHashTester("foobar", {3})),
absl::Hash<DummyFooBar>()(DummyFooBar{}));
for (size_t big_buffer_size : {1024u * 2 + 512u, 1024u * 3}) {
SCOPED_TRACE(big_buffer_size);
std::string big_buffer;
for (size_t i = 0; i < big_buffer_size; ++i) {
big_buffer.push_back(32 + (i * (i / 3)) % 64);
}
auto big_buffer_hash = hash(PiecewiseHashTester(big_buffer));
const int possible_breaks = 9;
size_t breaks[possible_breaks] = {1, 512, 1023, 1024, 1025,
1536, 2047, 2048, 2049};
for (unsigned test_mask = 0; test_mask < (1u << possible_breaks);
++test_mask) {
SCOPED_TRACE(test_mask);
std::set<size_t> break_locations;
for (int j = 0; j < possible_breaks; ++j) {
if (test_mask & (1u << j)) {
break_locations.insert(breaks[j]);
}
}
EXPECT_EQ(
hash(PiecewiseHashTester(big_buffer, std::move(break_locations))),
big_buffer_hash);
}
}
}
TEST(HashValueTest, PrivateSanity) {
EXPECT_TRUE(is_hashable<Private>::value);
EXPECT_NE(SpyHash(Private{0}), SpyHash(Private{1}));
EXPECT_EQ(SpyHash(Private{1}), SpyHash(Private{1}));
}
TEST(HashValueTest, Optional) {
EXPECT_TRUE(is_hashable<absl::optional<Private>>::value);
using O = absl::optional<Private>;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(O{}, O{{1}}, O{{-1}}, O{{10}})));
}
TEST(HashValueTest, Variant) {
using V = absl::variant<Private, std::string>;
EXPECT_TRUE(is_hashable<V>::value);
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
V(Private{1}), V(Private{-1}), V(Private{2}), V("ABC"), V("BCD"))));
#if ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
struct S {};
EXPECT_FALSE(is_hashable<absl::variant<S>>::value);
#endif
}
TEST(HashValueTest, ReferenceWrapper) {
EXPECT_TRUE(is_hashable<std::reference_wrapper<Private>>::value);
Private p1{1}, p10{10};
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
p1, p10, std::ref(p1), std::ref(p10), std::cref(p1), std::cref(p10))));
EXPECT_TRUE(is_hashable<std::reference_wrapper<int>>::value);
int one = 1, ten = 10;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
one, ten, std::ref(one), std::ref(ten), std::cref(one), std::cref(ten))));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
std::make_tuple(std::tuple<std::reference_wrapper<int>>(std::ref(one)),
std::tuple<std::reference_wrapper<int>>(std::ref(ten)),
std::tuple<int>(one), std::tuple<int>(ten))));
}
template <typename T, typename = void>
struct IsHashCallable : std::false_type {};
template <typename T>
struct IsHashCallable<T, absl::void_t<decltype(std::declval<absl::Hash<T>>()(
std::declval<const T&>()))>> : std::true_type {};
template <typename T, typename = void>
struct IsAggregateInitializable : std::false_type {};
template <typename T>
struct IsAggregateInitializable<T, absl::void_t<decltype(T{})>>
: std::true_type {};
TEST(IsHashableTest, ValidHash) {
EXPECT_TRUE((is_hashable<int>::value));
EXPECT_TRUE(std::is_default_constructible<absl::Hash<int>>::value);
EXPECT_TRUE(std::is_copy_constructible<absl::Hash<int>>::value);
EXPECT_TRUE(std::is_move_constructible<absl::Hash<int>>::value);
EXPECT_TRUE(absl::is_copy_assignable<absl::Hash<int>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::Hash<int>>::value);
EXPECT_TRUE(IsHashCallable<int>::value);
EXPECT_TRUE(IsAggregateInitializable<absl::Hash<int>>::value);
}
#if ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
TEST(IsHashableTest, PoisonHash) {
struct X {};
EXPECT_FALSE((is_hashable<X>::value));
EXPECT_FALSE(std::is_default_constructible<absl::Hash<X>>::value);
EXPECT_FALSE(std::is_copy_constructible<absl::Hash<X>>::value);
EXPECT_FALSE(std::is_move_constructible<absl::Hash<X>>::value);
EXPECT_FALSE(absl::is_copy_assignable<absl::Hash<X>>::value);
EXPECT_FALSE(absl::is_move_assignable<absl::Hash<X>>::value);
EXPECT_FALSE(IsHashCallable<X>::value);
#if !defined(__GNUC__) || defined(__clang__)
EXPECT_FALSE(IsAggregateInitializable<absl::Hash<X>>::value);
#endif
}
#endif
struct NoOp {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, NoOp n) {
return h;
}
};
struct EmptyCombine {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, EmptyCombine e) {
return HashCode::combine(std::move(h));
}
};
template <typename Int>
struct CombineIterative {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, CombineIterative c) {
for (int i = 0; i < 5; ++i) {
h = HashCode::combine(std::move(h), Int(i));
}
return h;
}
};
template <typename Int>
struct CombineVariadic {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, CombineVariadic c) {
return HashCode::combine(std::move(h), Int(0), Int(1), Int(2), Int(3),
Int(4));
}
};
enum class InvokeTag {
kUniquelyRepresented,
kHashValue,
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
kLegacyHash,
#endif
kStdHash,
kNone
};
template <InvokeTag T>
using InvokeTagConstant = std::integral_constant<InvokeTag, T>;
template <InvokeTag... Tags>
struct MinTag;
template <InvokeTag a, InvokeTag b, InvokeTag... Tags>
struct MinTag<a, b, Tags...> : MinTag<(a < b ? a : b), Tags...> {};
template <InvokeTag a>
struct MinTag<a> : InvokeTagConstant<a> {};
template <InvokeTag... Tags>
struct CustomHashType {
explicit CustomHashType(size_t val) : value(val) {}
size_t value;
};
template <InvokeTag allowed, InvokeTag... tags>
struct EnableIfContained
: std::enable_if<absl::disjunction<
std::integral_constant<bool, allowed == tags>...>::value> {};
template <
typename H, InvokeTag... Tags,
typename = typename EnableIfContained<InvokeTag::kHashValue, Tags...>::type>
H AbslHashValue(H state, CustomHashType<Tags...> t) {
static_assert(MinTag<Tags...>::value == InvokeTag::kHashValue, "");
return H::combine(std::move(state),
t.value + static_cast<int>(InvokeTag::kHashValue));
}
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
template <InvokeTag... Tags>
struct is_uniquely_represented<
CustomHashType<Tags...>,
typename EnableIfContained<InvokeTag::kUniquelyRepresented, Tags...>::type>
: std::true_type {};
}
ABSL_NAMESPACE_END
}
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
namespace ABSL_INTERNAL_LEGACY_HASH_NAMESPACE {
template <InvokeTag... Tags>
struct hash<CustomHashType<Tags...>> {
template <InvokeTag... TagsIn, typename = typename EnableIfContained<
InvokeTag::kLegacyHash, TagsIn...>::type>
size_t operator()(CustomHashType<TagsIn...> t) const {
static_assert(MinTag<Tags...>::value == InvokeTag::kLegacyHash, "");
return t.value + static_cast<int>(InvokeTag::kLegacyHash);
}
};
}
#endif
namespace std {
template <InvokeTag... Tags>
struct hash<CustomHashType<Tags...>> {
template <InvokeTag... TagsIn, typename = typename EnableIfContained<
InvokeTag::kStdHash, TagsIn...>::type>
size_t operator()(CustomHashType<TagsIn...> t) const {
static_assert(MinTag<Tags...>::value == InvokeTag::kStdHash, "");
return t.value + static_cast<int>(InvokeTag::kStdHash);
}
};
}
namespace {
template <typename... T>
void TestCustomHashType(InvokeTagConstant<InvokeTag::kNone>, T...) {
using type = CustomHashType<T::value...>;
SCOPED_TRACE(testing::PrintToString(std::vector<InvokeTag>{T::value...}));
EXPECT_TRUE(is_hashable<type>());
EXPECT_TRUE(is_hashable<const type>());
EXPECT_TRUE(is_hashable<const type&>());
const size_t offset = static_cast<int>(std::min({T::value...}));
EXPECT_EQ(SpyHash(type(7)), SpyHash(size_t{7 + offset}));
}
void TestCustomHashType(InvokeTagConstant<InvokeTag::kNone>) {
#if ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
using type = CustomHashType<>;
EXPECT_FALSE(is_hashable<type>());
EXPECT_FALSE(is_hashable<const type>());
EXPECT_FALSE(is_hashable<const type&>());
#endif
}
template <InvokeTag Tag, typename... T>
void TestCustomHashType(InvokeTagConstant<Tag> tag, T... t) {
constexpr auto next = static_cast<InvokeTag>(static_cast<int>(Tag) + 1);
TestCustomHashType(InvokeTagConstant<next>(), tag, t...);
TestCustomHashType(InvokeTagConstant<next>(), t...);
}
TEST(HashTest, CustomHashType) {
TestCustomHashType(InvokeTagConstant<InvokeTag{}>());
}
TEST(HashTest, NoOpsAreEquivalent) {
EXPECT_EQ(Hash<NoOp>()({}), Hash<NoOp>()({}));
EXPECT_EQ(Hash<NoOp>()({}), Hash<EmptyCombine>()({}));
}
template <typename T>
class HashIntTest : public testing::Test {
};
TYPED_TEST_SUITE_P(HashIntTest);
TYPED_TEST_P(HashIntTest, BasicUsage) {
EXPECT_NE(Hash<NoOp>()({}), Hash<TypeParam>()(0));
EXPECT_NE(Hash<NoOp>()({}),
Hash<TypeParam>()(std::numeric_limits<TypeParam>::max()));
if (std::numeric_limits<TypeParam>::min() != 0) {
EXPECT_NE(Hash<NoOp>()({}),
Hash<TypeParam>()(std::numeric_limits<TypeParam>::min()));
}
EXPECT_EQ(Hash<CombineIterative<TypeParam>>()({}),
Hash<CombineVariadic<TypeParam>>()({}));
}
REGISTER_TYPED_TEST_SUITE_P(HashIntTest, BasicUsage);
using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t,
uint32_t, uint64_t, size_t>;
INSTANTIATE_TYPED_TEST_SUITE_P(My, HashIntTest, IntTypes);
struct StructWithPadding {
char c;
int i;
template <typename H>
friend H AbslHashValue(H hash_state, const StructWithPadding& s) {
return H::combine(std::move(hash_state), s.c, s.i);
}
};
static_assert(sizeof(StructWithPadding) > sizeof(char) + sizeof(int),
"StructWithPadding doesn't have padding");
static_assert(std::is_standard_layout<StructWithPadding>::value, "");
template <typename T>
struct ArraySlice {
T* begin;
T* end;
template <typename H>
friend H AbslHashValue(H hash_state, const ArraySlice& slice) {
for (auto t = slice.begin; t != slice.end; ++t) {
hash_state = H::combine(std::move(hash_state), *t);
}
return hash_state;
}
};
TEST(HashTest, HashNonUniquelyRepresentedType) {
static const size_t kNumStructs = 10;
unsigned char buffer1[kNumStructs * sizeof(StructWithPadding)];
std::memset(buffer1, 0, sizeof(buffer1));
auto* s1 = reinterpret_cast<StructWithPadding*>(buffer1);
unsigned char buffer2[kNumStructs * sizeof(StructWithPadding)];
std::memset(buffer2, 255, sizeof(buffer2));
auto* s2 = reinterpret_cast<StructWithPadding*>(buffer2);
for (size_t i = 0; i < kNumStructs; ++i) {
SCOPED_TRACE(i);
s1[i].c = s2[i].c = static_cast<char>('0' + i);
s1[i].i = s2[i].i = static_cast<int>(i);
ASSERT_FALSE(memcmp(buffer1 + i * sizeof(StructWithPadding),
buffer2 + i * sizeof(StructWithPadding),
sizeof(StructWithPadding)) == 0)
<< "Bug in test code: objects do not have unequal"
<< " object representations";
}
EXPECT_EQ(Hash<StructWithPadding>()(s1[0]), Hash<StructWithPadding>()(s2[0]));
EXPECT_EQ(Hash<ArraySlice<StructWithPadding>>()({s1, s1 + kNumStructs}),
Hash<ArraySlice<StructWithPadding>>()({s2, s2 + kNumStructs}));
}
TEST(HashTest, StandardHashContainerUsage) {
std::unordered_map<int, std::string, Hash<int>> map = {{0, "foo"},
{42, "bar"}};
EXPECT_NE(map.find(0), map.end());
EXPECT_EQ(map.find(1), map.end());
EXPECT_NE(map.find(0u), map.end());
}
struct ConvertibleFromNoOp {
ConvertibleFromNoOp(NoOp) {}
template <typename H>
friend H AbslHashValue(H hash_state, ConvertibleFromNoOp) {
return H::combine(std::move(hash_state), 1);
}
};
TEST(HashTest, HeterogeneousCall) {
EXPECT_NE(Hash<ConvertibleFromNoOp>()(NoOp()),
Hash<NoOp>()(NoOp()));
}
TEST(IsUniquelyRepresentedTest, SanityTest) {
using absl::hash_internal::is_uniquely_represented;
EXPECT_TRUE(is_uniquely_represented<unsigned char>::value);
EXPECT_TRUE(is_uniquely_represented<int>::value);
EXPECT_FALSE(is_uniquely_represented<bool>::value);
EXPECT_FALSE(is_uniquely_represented<int*>::value);
}
struct IntAndString {
int i;
std::string s;
template <typename H>
friend H AbslHashValue(H hash_state, IntAndString int_and_string) {
return H::combine(std::move(hash_state), int_and_string.s,
int_and_string.i);
}
};
TEST(HashTest, SmallValueOn64ByteBoundary) {
Hash<IntAndString>()(IntAndString{0, std::string(63, '0')});
}
TEST(HashTest, TypeErased) {
EXPECT_TRUE((is_hashable<TypeErasedValue<size_t>>::value));
EXPECT_TRUE((is_hashable<std::pair<TypeErasedValue<size_t>, int>>::value));
EXPECT_EQ(SpyHash(TypeErasedValue<size_t>(7)), SpyHash(size_t{7}));
EXPECT_NE(SpyHash(TypeErasedValue<size_t>(7)), SpyHash(size_t{13}));
EXPECT_EQ(SpyHash(std::make_pair(TypeErasedValue<size_t>(7), 17)),
SpyHash(std::make_pair(size_t{7}, 17)));
absl::flat_hash_set<absl::flat_hash_set<int>> ss = {{1, 2}, {3, 4}};
TypeErasedContainer<absl::flat_hash_set<absl::flat_hash_set<int>>> es = {
absl::flat_hash_set<int>{1, 2}, {3, 4}};
absl::flat_hash_set<TypeErasedContainer<absl::flat_hash_set<int>>> se = {
{1, 2}, {3, 4}};
EXPECT_EQ(SpyHash(ss), SpyHash(es));
EXPECT_EQ(SpyHash(ss), SpyHash(se));
}
struct ValueWithBoolConversion {
operator bool() const { return false; }
int i;
};
}
namespace std {
template <>
struct hash<ValueWithBoolConversion> {
size_t operator()(ValueWithBoolConversion v) {
return static_cast<size_t>(v.i);
}
};
}
namespace {
TEST(HashTest, DoesNotUseImplicitConversionsToBool) {
EXPECT_NE(absl::Hash<ValueWithBoolConversion>()(ValueWithBoolConversion{0}),
absl::Hash<ValueWithBoolConversion>()(ValueWithBoolConversion{1}));
}
TEST(HashOf, MatchesHashForSingleArgument) {
std::string s = "forty two";
double d = 42.0;
std::tuple<int, int> t{4, 2};
int i = 42;
int neg_i = -42;
int16_t i16 = 42;
int16_t neg_i16 = -42;
int8_t i8 = 42;
int8_t neg_i8 = -42;
EXPECT_EQ(absl::HashOf(s), absl::Hash<std::string>{}(s));
EXPECT_EQ(absl::HashOf(d), absl::Hash<double>{}(d));
EXPECT_EQ(absl::HashOf(t), (absl::Hash<std::tuple<int, int>>{}(t)));
EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i));
EXPECT_EQ(absl::HashOf(neg_i), absl::Hash<int>{}(neg_i));
EXPECT_EQ(absl::HashOf(i16), absl::Hash<int16_t>{}(i16));
EXPECT_EQ(absl::HashOf(neg_i16), absl::Hash<int16_t>{}(neg_i16));
EXPECT_EQ(absl::HashOf(i8), absl::Hash<int8_t>{}(i8));
EXPECT_EQ(absl::HashOf(neg_i8), absl::Hash<int8_t>{}(neg_i8));
}
TEST(HashOf, MatchesHashOfTupleForMultipleArguments) {
std::string hello = "hello";
std::string world = "world";
EXPECT_EQ(absl::HashOf(), absl::HashOf(std::make_tuple()));
EXPECT_EQ(absl::HashOf(hello), absl::HashOf(std::make_tuple(hello)));
EXPECT_EQ(absl::HashOf(hello, world),
absl::HashOf(std::make_tuple(hello, world)));
}
template <typename T>
std::true_type HashOfExplicitParameter(decltype(absl::HashOf<T>(0))) {
return {};
}
template <typename T>
std::false_type HashOfExplicitParameter(size_t) {
return {};
}
TEST(HashOf, CantPassExplicitTemplateParameters) {
EXPECT_FALSE(HashOfExplicitParameter<int>(0));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/hash/internal/hash.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/hash/hash_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
da860396-918a-45e7-8b9f-5a4861646232 | cpp | google/cel-cpp | value_export_util | eval/public/value_export_util.cc | eval/public/value_export_util_test.cc | #include "eval/public/value_export_util.h"
#include <string>
#include "google/protobuf/util/json_util.h"
#include "google/protobuf/util/time_util.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "internal/proto_time_encoding.h"
namespace google::api::expr::runtime {
using google::protobuf::Duration;
using google::protobuf::Timestamp;
using google::protobuf::Value;
using google::protobuf::util::TimeUtil;
absl::Status KeyAsString(const CelValue& value, std::string* key) {
switch (value.type()) {
case CelValue::Type::kInt64: {
*key = absl::StrCat(value.Int64OrDie());
break;
}
case CelValue::Type::kUint64: {
*key = absl::StrCat(value.Uint64OrDie());
break;
}
case CelValue::Type::kString: {
key->assign(value.StringOrDie().value().data(),
value.StringOrDie().value().size());
break;
}
default: {
return absl::InvalidArgumentError("Unsupported map type");
}
}
return absl::OkStatus();
}
absl::Status ExportAsProtoValue(const CelValue& in_value, Value* out_value,
google::protobuf::Arena* arena) {
if (in_value.IsNull()) {
out_value->set_null_value(google::protobuf::NULL_VALUE);
return absl::OkStatus();
}
switch (in_value.type()) {
case CelValue::Type::kBool: {
out_value->set_bool_value(in_value.BoolOrDie());
break;
}
case CelValue::Type::kInt64: {
out_value->set_number_value(static_cast<double>(in_value.Int64OrDie()));
break;
}
case CelValue::Type::kUint64: {
out_value->set_number_value(static_cast<double>(in_value.Uint64OrDie()));
break;
}
case CelValue::Type::kDouble: {
out_value->set_number_value(in_value.DoubleOrDie());
break;
}
case CelValue::Type::kString: {
auto value = in_value.StringOrDie().value();
out_value->set_string_value(value.data(), value.size());
break;
}
case CelValue::Type::kBytes: {
absl::Base64Escape(in_value.BytesOrDie().value(),
out_value->mutable_string_value());
break;
}
case CelValue::Type::kDuration: {
Duration duration;
auto status =
cel::internal::EncodeDuration(in_value.DurationOrDie(), &duration);
if (!status.ok()) {
return status;
}
out_value->set_string_value(TimeUtil::ToString(duration));
break;
}
case CelValue::Type::kTimestamp: {
Timestamp timestamp;
auto status =
cel::internal::EncodeTime(in_value.TimestampOrDie(), ×tamp);
if (!status.ok()) {
return status;
}
out_value->set_string_value(TimeUtil::ToString(timestamp));
break;
}
case CelValue::Type::kMessage: {
google::protobuf::util::JsonPrintOptions json_options;
json_options.preserve_proto_field_names = true;
std::string json;
auto status = google::protobuf::util::MessageToJsonString(*in_value.MessageOrDie(),
&json, json_options);
if (!status.ok()) {
return absl::InternalError(status.ToString());
}
google::protobuf::util::JsonParseOptions json_parse_options;
status = google::protobuf::util::JsonStringToMessage(json, out_value,
json_parse_options);
if (!status.ok()) {
return absl::InternalError(status.ToString());
}
break;
}
case CelValue::Type::kList: {
const CelList* cel_list = in_value.ListOrDie();
auto out_values = out_value->mutable_list_value();
for (int i = 0; i < cel_list->size(); i++) {
auto status = ExportAsProtoValue((*cel_list).Get(arena, i),
out_values->add_values(), arena);
if (!status.ok()) {
return status;
}
}
break;
}
case CelValue::Type::kMap: {
const CelMap* cel_map = in_value.MapOrDie();
CEL_ASSIGN_OR_RETURN(auto keys_list, cel_map->ListKeys(arena));
auto out_values = out_value->mutable_struct_value()->mutable_fields();
for (int i = 0; i < keys_list->size(); i++) {
std::string key;
CelValue map_key = (*keys_list).Get(arena, i);
auto status = KeyAsString(map_key, &key);
if (!status.ok()) {
return status;
}
auto map_value_ref = (*cel_map).Get(arena, map_key);
CelValue map_value =
(map_value_ref) ? map_value_ref.value() : CelValue();
status = ExportAsProtoValue(map_value, &((*out_values)[key]), arena);
if (!status.ok()) {
return status;
}
}
break;
}
default: {
return absl::InvalidArgumentError("Unsupported value type");
}
}
return absl::OkStatus();
}
} | #include "eval/public/value_export_util.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google::api::expr::runtime {
namespace {
using google::protobuf::Duration;
using google::protobuf::ListValue;
using google::protobuf::Struct;
using google::protobuf::Timestamp;
using google::protobuf::Value;
using google::protobuf::Arena;
TEST(ValueExportUtilTest, ConvertBoolValue) {
CelValue cel_value = CelValue::CreateBool(true);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kBoolValue);
EXPECT_EQ(value.bool_value(), true);
}
TEST(ValueExportUtilTest, ConvertInt64Value) {
CelValue cel_value = CelValue::CreateInt64(-1);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kNumberValue);
EXPECT_DOUBLE_EQ(value.number_value(), -1);
}
TEST(ValueExportUtilTest, ConvertUint64Value) {
CelValue cel_value = CelValue::CreateUint64(1);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kNumberValue);
EXPECT_DOUBLE_EQ(value.number_value(), 1);
}
TEST(ValueExportUtilTest, ConvertDoubleValue) {
CelValue cel_value = CelValue::CreateDouble(1.3);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kNumberValue);
EXPECT_DOUBLE_EQ(value.number_value(), 1.3);
}
TEST(ValueExportUtilTest, ConvertStringValue) {
std::string test = "test";
CelValue cel_value = CelValue::CreateString(&test);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "test");
}
TEST(ValueExportUtilTest, ConvertBytesValue) {
std::string test = "test";
CelValue cel_value = CelValue::CreateBytes(&test);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "dGVzdA==");
}
TEST(ValueExportUtilTest, ConvertDurationValue) {
Duration duration;
duration.set_seconds(2);
duration.set_nanos(3);
CelValue cel_value = CelProtoWrapper::CreateDuration(&duration);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "2.000000003s");
}
TEST(ValueExportUtilTest, ConvertTimestampValue) {
Timestamp timestamp;
timestamp.set_seconds(1000000000);
timestamp.set_nanos(3);
CelValue cel_value = CelProtoWrapper::CreateTimestamp(×tamp);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "2001-09-09T01:46:40.000000003Z");
}
TEST(ValueExportUtilTest, ConvertStructMessage) {
Struct struct_msg;
(*struct_msg.mutable_fields())["string_value"].set_string_value("test");
Arena arena;
CelValue cel_value = CelProtoWrapper::CreateMessage(&struct_msg, &arena);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
EXPECT_THAT(value.struct_value(), testutil::EqualsProto(struct_msg));
}
TEST(ValueExportUtilTest, ConvertValueMessage) {
Value value_in;
(*value_in.mutable_struct_value()->mutable_fields())["boolean_value"]
.set_bool_value(true);
Arena arena;
CelValue cel_value = CelProtoWrapper::CreateMessage(&value_in, &arena);
Value value_out;
EXPECT_OK(ExportAsProtoValue(cel_value, &value_out));
EXPECT_THAT(value_in, testutil::EqualsProto(value_out));
}
TEST(ValueExportUtilTest, ConvertListValueMessage) {
ListValue list_value;
list_value.add_values()->set_string_value("test");
list_value.add_values()->set_bool_value(true);
Arena arena;
CelValue cel_value = CelProtoWrapper::CreateMessage(&list_value, &arena);
Value value_out;
EXPECT_OK(ExportAsProtoValue(cel_value, &value_out));
EXPECT_THAT(list_value, testutil::EqualsProto(value_out.list_value()));
}
TEST(ValueExportUtilTest, ConvertRepeatedBoolValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_bool_list(true);
msg->add_bool_list(false);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("bool_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).bool_value(), true);
EXPECT_EQ(list_value.list_value().values(1).bool_value(), false);
}
TEST(ValueExportUtilTest, ConvertRepeatedInt32Value) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_int32_list(2);
msg->add_int32_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("int32_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_DOUBLE_EQ(list_value.list_value().values(0).number_value(), 2);
EXPECT_DOUBLE_EQ(list_value.list_value().values(1).number_value(), 3);
}
TEST(ValueExportUtilTest, ConvertRepeatedInt64Value) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_int64_list(2);
msg->add_int64_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("int64_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "2");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "3");
}
TEST(ValueExportUtilTest, ConvertRepeatedUint64Value) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_uint64_list(2);
msg->add_uint64_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("uint64_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "2");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "3");
}
TEST(ValueExportUtilTest, ConvertRepeatedDoubleValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_double_list(2);
msg->add_double_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("double_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_DOUBLE_EQ(list_value.list_value().values(0).number_value(), 2);
EXPECT_DOUBLE_EQ(list_value.list_value().values(1).number_value(), 3);
}
TEST(ValueExportUtilTest, ConvertRepeatedStringValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_string_list("test1");
msg->add_string_list("test2");
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("string_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "test1");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "test2");
}
TEST(ValueExportUtilTest, ConvertRepeatedBytesValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_bytes_list("test1");
msg->add_bytes_list("test2");
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("bytes_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "dGVzdDE=");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "dGVzdDI=");
}
TEST(ValueExportUtilTest, ConvertCelList) {
Arena arena;
Value value;
std::vector<CelValue> values;
values.push_back(CelValue::CreateInt64(2));
values.push_back(CelValue::CreateInt64(3));
CelList *cel_list = Arena::Create<ContainerBackedListImpl>(&arena, values);
CelValue cel_value = CelValue::CreateList(cel_list);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kListValue);
EXPECT_DOUBLE_EQ(value.list_value().values(0).number_value(), 2);
EXPECT_DOUBLE_EQ(value.list_value().values(1).number_value(), 3);
}
TEST(ValueExportUtilTest, ConvertCelMapWithStringKey) {
Value value;
std::vector<std::pair<CelValue, CelValue>> map_entries;
std::string key1 = "key1";
std::string key2 = "key2";
std::string value1 = "value1";
std::string value2 = "value2";
map_entries.push_back(
{CelValue::CreateString(&key1), CelValue::CreateString(&value1)});
map_entries.push_back(
{CelValue::CreateString(&key2), CelValue::CreateString(&value2)});
auto cel_map = CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(map_entries))
.value();
CelValue cel_value = CelValue::CreateMap(cel_map.get());
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
const auto& fields = value.struct_value().fields();
EXPECT_EQ(fields.at(key1).string_value(), value1);
EXPECT_EQ(fields.at(key2).string_value(), value2);
}
TEST(ValueExportUtilTest, ConvertCelMapWithInt64Key) {
Value value;
std::vector<std::pair<CelValue, CelValue>> map_entries;
int key1 = -1;
int key2 = 2;
std::string value1 = "value1";
std::string value2 = "value2";
map_entries.push_back(
{CelValue::CreateInt64(key1), CelValue::CreateString(&value1)});
map_entries.push_back(
{CelValue::CreateInt64(key2), CelValue::CreateString(&value2)});
auto cel_map = CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(map_entries))
.value();
CelValue cel_value = CelValue::CreateMap(cel_map.get());
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
const auto& fields = value.struct_value().fields();
EXPECT_EQ(fields.at(absl::StrCat(key1)).string_value(), value1);
EXPECT_EQ(fields.at(absl::StrCat(key2)).string_value(), value2);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/value_export_util.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/value_export_util_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
752a36ae-7bac-440c-b8fb-ae3cbe7201fb | cpp | tensorflow/tensorflow | periodic_function | tensorflow/core/kernels/batching_util/periodic_function.cc | tensorflow/core/kernels/batching_util/periodic_function_test.cc | #include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <algorithm>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace serving {
PeriodicFunction::PeriodicFunction(absl::AnyInvocable<void()> function,
const int64_t interval_micros,
const Options& options)
: function_(std::move(function)),
interval_micros_([interval_micros]() -> int64 {
if (interval_micros < 0) {
const string error = strings::StrCat(
" The value of 'interval_micros' should be >= 0: ",
interval_micros, ". ");
DCHECK(false) << error;
LOG(WARNING) << error << "Resetting it to 0.";
return 0;
}
return interval_micros;
}()),
options_(options) {
thread_.reset(options_.env->StartThread(
options_.thread_options, options_.thread_name_prefix, [this]() {
RunLoop(options_.env->NowMicros());
}));
}
PeriodicFunction::~PeriodicFunction() {
NotifyStop();
thread_.reset();
}
void PeriodicFunction::NotifyStop() {
if (!stop_thread_.HasBeenNotified()) {
stop_thread_.Notify();
}
}
void PeriodicFunction::RunLoop(const int64_t start) {
{
if (options_.startup_delay_micros > 0) {
const int64_t deadline = start + options_.startup_delay_micros;
options_.env->SleepForMicroseconds(deadline - start);
}
while (!stop_thread_.HasBeenNotified()) {
VLOG(3) << "Running function.";
const int64_t begin = options_.env->NowMicros();
function_();
const int64_t end =
std::max(static_cast<int64_t>(options_.env->NowMicros()), begin);
const int64_t deadline = begin + interval_micros_;
if (deadline > end) {
if (end > begin) {
VLOG(3) << "Reducing interval_micros from " << interval_micros_
<< " to " << (deadline - end);
}
options_.env->SleepForMicroseconds(deadline - end);
} else {
VLOG(3) << "Function took longer than interval_micros, so not sleeping";
}
}
}
}
}
} | #include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <memory>
#include <string>
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
class PeriodicFunctionTestAccess {
public:
explicit PeriodicFunctionTestAccess(PeriodicFunction* periodic_function)
: periodic_function_(periodic_function) {}
void NotifyStop() { periodic_function_->NotifyStop(); }
private:
PeriodicFunction* const periodic_function_;
};
}
namespace {
using test_util::FakeClockEnv;
void StopPeriodicFunction(PeriodicFunction* periodic_function,
FakeClockEnv* fake_clock_env,
const uint64 pf_interval_micros) {
fake_clock_env->BlockUntilThreadsAsleep(1);
internal::PeriodicFunctionTestAccess(periodic_function).NotifyStop();
fake_clock_env->AdvanceByMicroseconds(pf_interval_micros);
}
TEST(PeriodicFunctionTest, ObeyInterval) {
const int64_t kPeriodMicros = 2;
const int kCalls = 10;
int actual_calls = 0;
{
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
for (int i = 0; i < kCalls; ++i) {
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
ASSERT_EQ(actual_calls, kCalls + 1);
}
TEST(PeriodicFunctionTest, ObeyStartupDelay) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
int actual_calls = 0;
{
PeriodicFunction::Options options;
options.startup_delay_micros = kDelayMicros;
FakeClockEnv fake_clock_env(Env::Default());
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
EXPECT_EQ(0, actual_calls);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
StopPeriodicFunction(&periodic_function, &fake_clock_env, kDelayMicros);
}
EXPECT_EQ(1, actual_calls);
}
TEST(PeriodicFunctionTest, StartupDelayRace) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
mutex mu;
int counter = 0;
std::unique_ptr<Notification> listener(new Notification);
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
options.startup_delay_micros = kDelayMicros;
PeriodicFunction periodic_function(
[&mu, &counter, &listener]() {
mutex_lock l(mu);
counter++;
listener->Notify();
},
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(1, counter);
listener.reset(new Notification);
}
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(2, counter);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
TEST(PeriodicFunctionTest, MinInterval) {
PeriodicFunction periodic_function(
[]() { Env::Default()->SleepForMicroseconds(20 * 1000); }, 0);
}
class PeriodicFunctionWithFakeClockEnvTest : public ::testing::Test {
protected:
const int64_t kPeriodMicros = 50;
PeriodicFunctionWithFakeClockEnvTest()
: fake_clock_env_(Env::Default()),
counter_(0),
pf_(
[this]() {
mutex_lock l(counter_mu_);
++counter_;
},
kPeriodMicros, GetPeriodicFunctionOptions()) {}
PeriodicFunction::Options GetPeriodicFunctionOptions() {
PeriodicFunction::Options options;
options.thread_name_prefix = "ignore";
options.env = &fake_clock_env_;
return options;
}
void SetUp() override {
ASSERT_TRUE(AwaitCount(1));
}
void TearDown() override {
StopPeriodicFunction(&pf_, &fake_clock_env_, kPeriodMicros);
}
bool AwaitCount(int expected_counter) {
fake_clock_env_.BlockUntilThreadsAsleep(1);
{
mutex_lock lock(counter_mu_);
return counter_ == expected_counter;
}
}
FakeClockEnv fake_clock_env_;
mutex counter_mu_;
int counter_;
PeriodicFunction pf_;
};
TEST_F(PeriodicFunctionWithFakeClockEnvTest, FasterThanRealTime) {
fake_clock_env_.AdvanceByMicroseconds(kPeriodMicros / 2);
for (int i = 2; i < 7; ++i) {
fake_clock_env_.AdvanceByMicroseconds(
kPeriodMicros);
EXPECT_TRUE(AwaitCount(i));
}
}
TEST_F(PeriodicFunctionWithFakeClockEnvTest, SlowerThanRealTime) {
Env::Default()->SleepForMicroseconds(
125 * 1000);
EXPECT_TRUE(AwaitCount(1));
}
TEST(PeriodicFunctionDeathTest, BadInterval) {
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function([]() {}, -1),
".* should be >= 0");
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function(
[]() {}, -1, PeriodicFunction::Options()),
".* should be >= 0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/periodic_function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/periodic_function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9e0a1bd-6fce-4357-8434-7921a81b9262 | cpp | tensorflow/tensorflow | refcounting_hash_map | third_party/xla/xla/refcounting_hash_map.h | third_party/xla/xla/refcounting_hash_map_test.cc | #ifndef XLA_REFCOUNTING_HASH_MAP_H_
#define XLA_REFCOUNTING_HASH_MAP_H_
#include <functional>
#include <memory>
#include "absl/base/thread_annotations.h"
#include "absl/container/node_hash_map.h"
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
namespace xla {
template <typename K, typename V>
class RefcountingHashMap {
public:
RefcountingHashMap() = default;
RefcountingHashMap(const RefcountingHashMap&) = delete;
RefcountingHashMap(RefcountingHashMap&&) = delete;
RefcountingHashMap& operator=(const RefcountingHashMap&) = delete;
RefcountingHashMap& operator=(RefcountingHashMap&&) = delete;
std::shared_ptr<V> GetOrCreateIfAbsent(
const K& key,
absl::FunctionRef<std::unique_ptr<V>(const K&)> value_factory) {
absl::MutexLock lock(&mu_);
auto it = map_.find(key);
if (it != map_.end()) {
if (std::shared_ptr<V> value = it->second.lock()) {
return value;
}
}
it = map_.emplace(key, std::weak_ptr<V>()).first;
std::shared_ptr<V> value(value_factory(key).release(),
Deleter{it->first, *this});
it->second = value;
return value;
}
private:
struct Deleter {
const K& key;
RefcountingHashMap& parent;
void operator()(V* v) {
delete v;
absl::MutexLock lock(&parent.mu_);
auto it = parent.map_.find(key);
if (it != parent.map_.end() && it->second.expired()) {
parent.map_.erase(it);
}
}
};
absl::Mutex mu_;
absl::node_hash_map<K, std::weak_ptr<V>> map_ ABSL_GUARDED_BY(mu_);
};
}
#endif | #include "xla/refcounting_hash_map.h"
#include <functional>
#include <memory>
#include <utility>
#include "xla/test.h"
namespace xla {
namespace {
struct DeleteNotifier {
DeleteNotifier() = default;
DeleteNotifier(const DeleteNotifier&) = delete;
DeleteNotifier& operator=(const DeleteNotifier&) = delete;
DeleteNotifier(DeleteNotifier&& o) noexcept : fn(std::move(o.fn)) {
o.fn = nullptr;
}
DeleteNotifier& operator=(DeleteNotifier&& o) noexcept {
fn = o.fn;
o.fn = nullptr;
return *this;
}
~DeleteNotifier() {
if (fn) {
fn();
}
}
std::function<void()> fn;
};
TEST(RefcountingHashMapTest, PointerIdentity) {
RefcountingHashMap<int, int> m;
auto factory = [](const int) { return std::make_unique<int>(); };
std::shared_ptr<int> a = m.GetOrCreateIfAbsent(0, factory);
std::shared_ptr<int> b = m.GetOrCreateIfAbsent(0, factory);
std::shared_ptr<int> c = m.GetOrCreateIfAbsent(1, factory);
EXPECT_EQ(a.get(), b.get());
EXPECT_NE(a.get(), c.get());
}
TEST(RefcountingHashMapTest, DefaultInitialized) {
RefcountingHashMap<int, int> m;
auto factory = [](const int) { return std::make_unique<int>(); };
EXPECT_EQ(*m.GetOrCreateIfAbsent(42, factory), 0);
}
TEST(RefcountingHashMapTest, DeletesEagerly) {
RefcountingHashMap<int, DeleteNotifier> m;
bool deleted = false;
auto factory = [](const int) { return std::make_unique<DeleteNotifier>(); };
auto handle = m.GetOrCreateIfAbsent(0, factory);
handle->fn = [&] { deleted = true; };
EXPECT_FALSE(deleted);
handle = nullptr;
EXPECT_TRUE(deleted);
}
TEST(RefcountingHashMapTest, CustomFactory) {
RefcountingHashMap<int, int> m;
auto factory = [](const int x) { return std::make_unique<int>(x + 1); };
EXPECT_EQ(*m.GetOrCreateIfAbsent(0, factory), 1);
EXPECT_EQ(*m.GetOrCreateIfAbsent(100, factory), 101);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/refcounting_hash_map.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/refcounting_hash_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f28e4228-9163-4126-9831-6df089c74fdf | cpp | tensorflow/tensorflow | list_set_item | tensorflow/lite/kernels/variants/list_kernels/list_set_item.cc | tensorflow/lite/kernels/variants/list_kernels/list_set_item_test.cc | #include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kListInputIdx = 0;
constexpr int kIndexInputIdx = 1;
constexpr int kListOutputIdx = 0;
class SetItemSemantic {
public:
SetItemSemantic(TfLiteContext* ctx, TfLiteNode* node)
: ctx_(ctx), node_(node) {}
static constexpr int kItemInputIdx = 2;
TfLiteStatus CheckIndexInput() const {
const TfLiteTensor* index_input;
TF_LITE_ENSURE_OK(ctx_,
GetInputSafe(ctx_, node_, kIndexInputIdx, &index_input));
TF_LITE_ENSURE_TYPES_EQ(ctx_, index_input->type, kTfLiteInt32);
return kTfLiteOk;
}
TfLiteStatus GetIndexVal(const TensorArray& arr, int& result) const {
const TfLiteTensor* index_input;
TF_LITE_ENSURE_OK(ctx_,
GetInputSafe(ctx_, node_, kIndexInputIdx, &index_input));
TF_LITE_ENSURE_EQ(ctx_, index_input->bytes, sizeof(int));
const int* index_data = GetTensorData<int>(index_input);
TF_LITE_ENSURE(ctx_, index_data != nullptr);
const int index = *index_data;
TF_LITE_ENSURE(ctx_, index >= 0);
result = index;
return kTfLiteOk;
}
private:
TfLiteContext* const ctx_;
TfLiteNode* const node_;
};
class PushBackSemantic {
public:
PushBackSemantic(TfLiteContext* ctx, TfLiteNode* node) {}
static constexpr int kItemInputIdx = 1;
TfLiteStatus CheckIndexInput() const { return kTfLiteOk; }
TfLiteStatus GetIndexVal(const TensorArray& arr, int& result) const {
result = arr.NumElements();
return kTfLiteOk;
}
};
template <class Semantic>
TfLiteStatus Prepare(TfLiteContext* ctx, TfLiteNode* node) {
const auto semantic = Semantic(ctx, node);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, kListInputIdx, &list_input));
TF_LITE_ENSURE_TYPES_EQ(ctx, list_input->type, kTfLiteVariant);
TF_LITE_ENSURE_OK(ctx, semantic.CheckIndexInput());
TfLiteTensor* output;
TF_LITE_ENSURE_OK(ctx, GetOutputSafe(ctx, node, kListOutputIdx, &output));
TF_LITE_ENSURE_TYPES_EQ(ctx, output->type, kTfLiteVariant);
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
template <class Semantic>
TfLiteStatus Eval(TfLiteContext* ctx, TfLiteNode* node) {
const auto semantic = Semantic(ctx, node);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(ctx, GetInputSafe(ctx, node, kListInputIdx, &list_input));
TF_LITE_ENSURE_EQ(ctx, list_input->allocation_type, kTfLiteVariantObject);
TensorArray* input_arr =
reinterpret_cast<TensorArray*>(list_input->data.data);
int index;
TF_LITE_ENSURE_OK(ctx, semantic.GetIndexVal(*input_arr, index));
const TfLiteTensor* item_input;
TF_LITE_ENSURE_OK(
ctx, GetInputSafe(ctx, node, semantic.kItemInputIdx, &item_input));
TF_LITE_ENSURE_TYPES_EQ(ctx, input_arr->ElementType(), item_input->type);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(ctx, GetOutputSafe(ctx, node, kListOutputIdx, &output));
TensorArray* output_arr = static_cast<TensorArray*>(
input_arr->CloneTo(static_cast<VariantData*>(output->data.data)));
TensorUniquePtr item_copy = BuildTfLiteTensor(
item_input->type, BuildTfLiteArray(*item_input->dims), kTfLiteDynamic);
TfLiteTensorCopy(item_input, item_copy.get());
if (index >= output_arr->NumElements()) {
output_arr->Resize(index + 1);
}
TF_LITE_ENSURE(ctx, output_arr->Set(index, std::move(item_copy)));
output->data.data = static_cast<VariantData*>(output_arr);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LIST_SET_ITEM() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare<SetItemSemantic>,
Eval<SetItemSemantic>};
return &r;
}
TfLiteRegistration* Register_LIST_PUSH_BACK() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare<PushBackSemantic>,
Eval<PushBackSemantic>};
return &r;
}
}
}
} | #include <cstddef>
#include <cstring>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::AllOf;
template <typename T>
class SetItemWithTypeTest : public ::testing::Test {};
class ListSetItemModel : public ListOpModel {
public:
explicit ListSetItemModel(TensorData item_data) {
list_input_ = AddInput({TensorType_VARIANT, {}});
index_input_ = AddInput({TensorType_INT32, {1}});
tensor_input_ = AddInput(item_data);
list_output_ = AddOutput({TensorType_VARIANT, {}});
SetCustomOp("ListSetItem", {}, Register_LIST_SET_ITEM);
BuildInterpreter({{}, {1}, item_data.shape});
interpreter_->input_tensor(0)->allocation_type = kTfLiteVariantObject;
}
const TensorArray* GetOutputTensorArray(int tensor_id) {
TfLiteTensor* tensor = interpreter_->tensor(tensor_id);
TFLITE_CHECK(tensor != nullptr && tensor->type == kTfLiteVariant &&
tensor->allocation_type == kTfLiteVariantObject);
return static_cast<const TensorArray*>(
static_cast<const VariantData*>(tensor->data.data));
}
int index_input_;
int list_input_;
int tensor_input_;
int list_output_;
};
constexpr int kNumElements = 4;
TYPED_TEST_SUITE_P(SetItemWithTypeTest);
TYPED_TEST_P(SetItemWithTypeTest, SetItemOnEmptyTensorList_ListShapeDefined) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {2, 2}, kNumElements, tfl_type);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest, SetItemOnEmptyTensorList_ListShapeUnranked) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest, OverwriteSetItem_ItemsSameShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
TypeParam init_item_data[4] = {1, 1, 1, 1};
m.ListSetItem(m.list_input_, 0, {2, 2}, tfl_type, init_item_data);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest,
SetItemOnNonEmptyListAtEmptyIndex_ItemsSameShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2, 2}});
m.PopulateTensor(m.index_input_, {1});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
TypeParam init_item_data[4] = {1, 1, 1, 1};
m.ListSetItem(m.list_input_, 0, {2, 2}, tfl_type, init_item_data);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 2; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(1))));
EXPECT_THAT(arr->At(1), AllOf(IsAllocatedAs(tfl_type), DimsAre({2, 2}),
FilledWith(static_cast<TypeParam>(0))));
}
TYPED_TEST_P(SetItemWithTypeTest, OverwriteSetItem_ItemsDifferentShape) {
TfLiteType tfl_type = typeToTfLiteType<TypeParam>();
std::optional<TensorType> tensor_type = TflToTensorType(tfl_type);
ASSERT_TRUE(tensor_type.has_value());
ListSetItemModel m({tensor_type.value(), {2}});
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, tfl_type);
TypeParam init_item_data[4] = {1, 1, 1, 1};
m.ListSetItem(m.list_input_, 0, {2, 2}, tfl_type, init_item_data);
m.PopulateTensor<TypeParam>(m.tensor_input_, {0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), kNumElements);
ASSERT_EQ(arr->ElementType(), tfl_type);
for (int i = 1; i < arr->NumElements(); ++i) {
EXPECT_EQ(arr->At(i), nullptr);
}
EXPECT_THAT(arr->At(0), AllOf(IsAllocatedAs(tfl_type), DimsAre({2}),
FilledWith(static_cast<TypeParam>(0))));
}
REGISTER_TYPED_TEST_SUITE_P(SetItemWithTypeTest,
SetItemOnEmptyTensorList_ListShapeDefined,
SetItemOnEmptyTensorList_ListShapeUnranked,
OverwriteSetItem_ItemsSameShape,
SetItemOnNonEmptyListAtEmptyIndex_ItemsSameShape,
OverwriteSetItem_ItemsDifferentShape);
using ValidTypes = ::testing::Types<int, int64_t, bool, float>;
INSTANTIATE_TYPED_TEST_SUITE_P(SetItemTests, SetItemWithTypeTest, ValidTypes);
TEST(ListSetItemTest, ItemNotSameTypeAsList_Fails) {
ListSetItemModel m{{TensorType_INT32, {2, 2}}};
m.PopulateTensor(m.index_input_, {0});
m.PopulateListTensor(m.list_input_, {}, kNumElements, kTfLiteInt64);
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListSetItemTest, IndexLessThanZero_Fails) {
ListSetItemModel m{{TensorType_INT32, {2, 2}}};
m.PopulateTensor(m.index_input_, {-1});
m.PopulateListTensor(m.list_input_, {}, kNumElements, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListSetItemTest, IndexLessGreaterThanListLen_ResizesList) {
ListSetItemModel m{{TensorType_INT32, {2, 2}}};
m.PopulateTensor(m.index_input_, {2});
m.PopulateListTensor(m.list_input_, {}, 2, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_output_);
ASSERT_EQ(arr->NumElements(), 3);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_set_item.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_set_item_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
807f7079-32eb-4aab-bf79-500ab8f1a70c | cpp | google/tensorstore | gzip_compressor | tensorstore/driver/n5/gzip_compressor.cc | tensorstore/driver/n5/gzip_compressor_test.cc | #include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/compressor_registry.h"
#include "tensorstore/internal/compression/zlib_compressor.h"
#include "tensorstore/internal/json_binding/json_binding.h"
namespace tensorstore {
namespace internal_n5 {
namespace {
struct Registration {
Registration() {
using internal::ZlibCompressor;
namespace jb = tensorstore::internal_json_binding;
RegisterCompressor<ZlibCompressor>(
"gzip",
jb::Object(
jb::Member(
"level",
jb::Projection(
&ZlibCompressor::level,
jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](auto* v) { *v = -1; }, jb::Integer<int>(-1, 9)))),
jb::Member(
"useZlib",
jb::Projection(
&ZlibCompressor::use_gzip_header,
jb::GetterSetter(
[](bool use_gzip) { return !use_gzip; },
[](bool& use_gzip, bool use_zlib) {
use_gzip = !use_zlib;
},
jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](bool* use_zlib) { *use_zlib = false; }))))));
}
} registration;
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(GzipCompressionTest, Parse) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Compressor>({
{{{"type", "gzip"}},
{{"type", "gzip"}, {"level", -1}, {"useZlib", false}}},
{{{"type", "gzip"}, {"level", 3}},
{{"type", "gzip"}, {"level", 3}, {"useZlib", false}}},
{{{"type", "gzip"}, {"useZlib", true}},
{{"type", "gzip"}, {"level", -1}, {"useZlib", true}}},
{
{{"type", "gzip"}, {"level", 3}, {"useZlib", false}},
{{"type", "gzip"}, {"level", 3}, {"useZlib", false}},
},
});
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"level", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"level", -2}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"level", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"useZlib", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"extra", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GzipCompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x1f, 0x8b, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x63, 0x60,
0x64, 0x60, 0x62, 0x60,
0x66, 0x60, 0x61, 0x60,
0x65, 0x60, 0x03, 0x00,
0xaa, 0xea, 0x6d, 0xbf,
0x0c, 0x00, 0x00, 0x00,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata,
N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression", {{"type", "gzip"}}}}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/gzip_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/gzip_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
03256495-5ec0-4ac6-9ce5-6a277dd98fb7 | cpp | google/tensorstore | dimension_permutation | tensorstore/index_space/dimension_permutation.cc | tensorstore/index_space/dimension_permutation_test.cc | #include "tensorstore/index_space/dimension_permutation.h"
#include <algorithm>
#include <numeric>
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void SetPermutation(ContiguousLayoutOrder order,
span<DimensionIndex> permutation) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = i;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = permutation.size() - 1 - i;
}
}
}
bool IsValidPermutation(span<const DimensionIndex> permutation) {
DimensionSet seen_dims;
const DimensionIndex rank = permutation.size();
if (rank > kMaxRank) return false;
for (DimensionIndex i = 0; i < rank; ++i) {
DimensionIndex dim = permutation[i];
if (dim < 0 || dim >= rank || seen_dims[dim]) {
return false;
}
seen_dims[dim] = true;
}
return true;
}
bool PermutationMatchesOrder(span<const DimensionIndex> permutation,
ContiguousLayoutOrder order) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) return false;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != permutation.size() - i - 1) return false;
}
}
return true;
}
void InvertPermutation(DimensionIndex rank, const DimensionIndex* perm,
DimensionIndex* inverse_perm) {
assert(IsValidPermutation(span(perm, rank)));
for (DimensionIndex i = 0; i < rank; ++i) {
inverse_perm[perm[i]] = i;
}
}
void SetPermutationFromStridedLayout(StridedLayoutView<> layout,
span<DimensionIndex> permutation) {
assert(layout.rank() == permutation.size());
std::iota(permutation.begin(), permutation.end(), DimensionIndex(0));
const auto get_effective_byte_stride_nabs = [&](DimensionIndex i) -> Index {
const Index byte_stride = layout.byte_strides()[i];
if (byte_stride > 0) return -byte_stride;
return byte_stride;
};
std::stable_sort(permutation.begin(), permutation.end(),
[&](DimensionIndex a, DimensionIndex b) {
return get_effective_byte_stride_nabs(a) <
get_effective_byte_stride_nabs(b);
});
}
void TransformOutputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> output_perm,
span<DimensionIndex> input_perm) {
assert(transform.valid());
assert(IsValidPermutation(output_perm));
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == input_perm.size());
assert(output_rank == output_perm.size());
DimensionIndex min_output_dim[kMaxRank];
std::fill_n(min_output_dim, input_rank, kMaxRank);
for (DimensionIndex orig_perm_i = 0; orig_perm_i < output_rank;
++orig_perm_i) {
const DimensionIndex output_dim = output_perm[orig_perm_i];
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const DimensionIndex input_dim = map.input_dimension();
min_output_dim[input_dim] =
std::min(min_output_dim[input_dim], orig_perm_i);
}
std::iota(input_perm.begin(), input_perm.end(), DimensionIndex(0));
std::sort(input_perm.begin(), input_perm.end(),
[&](DimensionIndex a, DimensionIndex b) {
DimensionIndex a_ordinal = min_output_dim[a];
DimensionIndex b_ordinal = min_output_dim[b];
if (a_ordinal != b_ordinal) return a_ordinal < b_ordinal;
return a < b;
});
assert(IsValidPermutation(input_perm));
}
void TransformInputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> input_perm,
span<DimensionIndex> output_perm) {
assert(transform.valid());
assert(IsValidPermutation(input_perm));
[[maybe_unused]] const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == input_perm.size());
assert(output_rank == output_perm.size());
DimensionIndex inverse_input_perm[kMaxRank];
InvertPermutation(input_rank, input_perm.data(), inverse_input_perm);
std::iota(output_perm.begin(), output_perm.end(), DimensionIndex(0));
const auto get_output_dim_ordinal = [&](DimensionIndex output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) {
return kMaxRank;
}
return inverse_input_perm[map.input_dimension()];
};
std::sort(output_perm.begin(), output_perm.end(),
[&](DimensionIndex a, DimensionIndex b) {
DimensionIndex a_ordinal = get_output_dim_ordinal(a);
DimensionIndex b_ordinal = get_output_dim_ordinal(b);
if (a_ordinal != b_ordinal) return a_ordinal < b_ordinal;
return a < b;
});
assert(IsValidPermutation(output_perm));
}
} | #include "tensorstore/index_space/dimension_permutation.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::IsValidPermutation;
using ::tensorstore::PermutationMatchesOrder;
using ::tensorstore::span;
TEST(SetPermutationTest, Rank0) {
std::vector<DimensionIndex> permutation(0);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
}
TEST(SetPermutationTest, Rank1COrder) {
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationTest, Rank1FortranOrder) {
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationTest, Rank2COrder) {
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationTest, Rank2FortranOrder) {
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(1, 0));
}
TEST(SetPermutationTest, Rank3COrder) {
std::vector<DimensionIndex> permutation(3, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1, 2));
}
TEST(SetPermutationTest, Rank3FortranOrder) {
std::vector<DimensionIndex> permutation(3, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(2, 1, 0));
}
TEST(IsValidPermutationTest, Basic) {
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>()));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({-1})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({0, 1})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({1, 0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1, 1})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({0, 0})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({1, 2, 0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1, 2, 1})));
}
TEST(PermutationMatchesOrderTest, Basic) {
EXPECT_TRUE(PermutationMatchesOrder({}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0}}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({{0}}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0, 1}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{0, 1}}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0, 1, 2}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{1}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{1}}, tensorstore::fortran_order));
EXPECT_FALSE(PermutationMatchesOrder({{1, 0}}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({{1, 0}}, tensorstore::fortran_order));
}
TEST(InvertPermutationTest, Rank0) {
std::vector<DimensionIndex> source;
std::vector<DimensionIndex> dest;
tensorstore::InvertPermutation(0, source.data(), dest.data());
}
TEST(InvertPermutationTest, Rank1) {
std::vector<DimensionIndex> source{0};
std::vector<DimensionIndex> dest(1, 42);
tensorstore::InvertPermutation(1, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(0));
}
TEST(InvertPermutationTest, Rank2Identity) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
tensorstore::InvertPermutation(2, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
}
TEST(InvertPermutationTest, Rank2Transpose) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
tensorstore::InvertPermutation(2, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
}
TEST(InvertPermutationTest, Rank3) {
std::vector<DimensionIndex> source{1, 2, 0};
std::vector<DimensionIndex> dest(3, 42);
tensorstore::InvertPermutation(3, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(2, 0, 1));
std::vector<DimensionIndex> source2(3, 42);
tensorstore::InvertPermutation(3, dest.data(), source2.data());
EXPECT_EQ(source, source2);
}
TEST(SetPermutationFromStridedLayoutTest, Rank0) {
tensorstore::StridedLayout<> layout(0);
std::vector<DimensionIndex> permutation(0);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
}
TEST(SetPermutationFromStridedLayoutTest, Rank1) {
tensorstore::StridedLayout<> layout({5}, {10});
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2COrder) {
tensorstore::StridedLayout<> layout({5, 6}, {10, 5});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2FortranOrder) {
tensorstore::StridedLayout<> layout({5, 6}, {5, 10});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(1, 0));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2ZeroStride) {
tensorstore::StridedLayout<> layout({5, 6}, {0, 0});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationFromStridedLayoutTest, Rank4) {
tensorstore::StridedLayout<> layout({5, 6, 7, 8}, {10, 5, 6, 6});
std::vector<DimensionIndex> permutation(4, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 2, 3, 1));
}
TEST(TransformOutputDimensionOrderTest, Rank0) {
std::vector<DimensionIndex> source;
std::vector<DimensionIndex> dest;
tensorstore::TransformOutputDimensionOrder(tensorstore::IdentityTransform(0),
source, dest);
}
TEST(TransformOutputDimensionOrderTest, Rank1Identity) {
std::vector<DimensionIndex> source{0};
std::vector<DimensionIndex> dest(1, 42);
tensorstore::TransformOutputDimensionOrder(tensorstore::IdentityTransform(1),
source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0));
}
TEST(TransformOutputDimensionOrderTest, Rank2COrderIdentity) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
auto transform = tensorstore::IdentityTransform(2);
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2FortranOrderIdentity) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
auto transform = tensorstore::IdentityTransform(2);
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2COrderTranspose) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(2) | Dims(1, 0).Transpose());
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2FortranOrderTranspose) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(2) | Dims(1, 0).Transpose());
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_permutation.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_permutation_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5ab2c70a-fff9-43d8-8884-621f1dde07fe | cpp | google/tsl | compute_engine_zone_provider | tsl/platform/cloud/compute_engine_zone_provider.cc | tsl/platform/cloud/compute_engine_zone_provider_test.cc | #include "tsl/platform/cloud/compute_engine_zone_provider.h"
#include <utility>
#include "tsl/platform/str_util.h"
namespace tsl {
namespace {
constexpr char kGceMetadataZonePath[] = "instance/zone";
}
ComputeEngineZoneProvider::ComputeEngineZoneProvider(
std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client)
: google_metadata_client_(std::move(google_metadata_client)) {}
absl::Status ComputeEngineZoneProvider::GetZone(string* zone) {
if (!cached_zone.empty()) {
*zone = cached_zone;
return absl::OkStatus();
}
std::vector<char> response_buffer;
TF_RETURN_IF_ERROR(google_metadata_client_->GetMetadata(kGceMetadataZonePath,
&response_buffer));
absl::string_view location(&response_buffer[0], response_buffer.size());
std::vector<string> elems = str_util::Split(location, "/");
if (elems.size() == 4) {
cached_zone = elems.back();
*zone = cached_zone;
} else {
LOG(ERROR) << "Failed to parse the zone name from location: "
<< string(location);
}
return absl::OkStatus();
}
ComputeEngineZoneProvider::~ComputeEngineZoneProvider() {}
} | #include "tsl/platform/cloud/compute_engine_zone_provider.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/test.h"
namespace tsl {
class ComputeEngineZoneProviderTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
TEST_F(ComputeEngineZoneProviderTest, GetZone) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"Header Metadata-Flavor: Google\n",
"projects/123456789/zones/us-west1-b")});
auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadata_client = std::make_shared<ComputeEngineMetadataClient>(
httpRequestFactory, RetryConfig(0 ));
ComputeEngineZoneProvider provider(metadata_client);
string zone;
TF_EXPECT_OK(provider.GetZone(&zone));
EXPECT_EQ("us-west1-b", zone);
TF_EXPECT_OK(provider.GetZone(&zone));
}
TEST_F(ComputeEngineZoneProviderTest, InvalidZoneString) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"Header Metadata-Flavor: Google\n",
"invalidresponse")});
auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadata_client = std::make_shared<ComputeEngineMetadataClient>(
httpRequestFactory, RetryConfig(0 ));
ComputeEngineZoneProvider provider(metadata_client);
string zone;
TF_EXPECT_OK(provider.GetZone(&zone));
EXPECT_EQ("", zone);
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/compute_engine_zone_provider.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/compute_engine_zone_provider_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
a5a37f0d-aeff-4608-bde1-db87689d2a23 | cpp | tensorflow/tensorflow | stacktrace | third_party/xla/third_party/tsl/tsl/platform/windows/stacktrace.cc | third_party/xla/third_party/tsl/tsl/platform/stacktrace_test.cc | #include "tsl/platform/windows/stacktrace.h"
#include <windows.h>
#include <dbghelp.h>
#include <string>
#include "tsl/platform/mutex.h"
#pragma comment(lib, "dbghelp.lib")
namespace tsl {
static bool SymbolsAreAvailableInit() {
SymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS);
return SymInitialize(GetCurrentProcess(), NULL, true);
}
static bool SymbolsAreAvailable() {
static bool kSymbolsAvailable = SymbolsAreAvailableInit();
return kSymbolsAvailable;
}
std::string CurrentStackTrace() {
HANDLE current_process = GetCurrentProcess();
static constexpr int kMaxStackFrames = 64;
void* trace[kMaxStackFrames];
int num_frames = CaptureStackBackTrace(0, kMaxStackFrames, trace, NULL);
static mutex mu(tsl::LINKER_INITIALIZED);
std::string stacktrace;
for (int i = 0; i < num_frames; ++i) {
const char* symbol = "(unknown)";
if (SymbolsAreAvailable()) {
char symbol_info_buffer[sizeof(SYMBOL_INFO) +
MAX_SYM_NAME * sizeof(TCHAR)];
SYMBOL_INFO* symbol_ptr =
reinterpret_cast<SYMBOL_INFO*>(symbol_info_buffer);
symbol_ptr->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol_ptr->MaxNameLen = MAX_SYM_NAME;
mutex_lock lock(mu);
if (SymFromAddr(current_process, reinterpret_cast<DWORD64>(trace[i]), 0,
symbol_ptr)) {
symbol = symbol_ptr->Name;
}
}
char buffer[256];
snprintf(buffer, sizeof(buffer), "0x%p\t%s", trace[i], symbol);
stacktrace += buffer;
stacktrace += "\n";
}
return stacktrace;
}
} | #include "tsl/platform/stacktrace.h"
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
#if defined(TF_HAS_STACKTRACE)
TEST(StacktraceTest, StacktraceWorks) {
std::string stacktrace = CurrentStackTrace();
LOG(INFO) << "CurrentStackTrace():\n" << stacktrace;
std::string expected_frame = "testing::internal::UnitTestImpl::RunAllTests";
EXPECT_NE(stacktrace.find(expected_frame), std::string::npos);
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/windows/stacktrace.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stacktrace_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa647017-9bed-4edd-a8b2-56279563cc52 | cpp | tensorflow/tensorflow | host_offload_legalize | third_party/xla/xla/service/host_offload_legalize.cc | third_party/xla/xla/service/host_offload_legalize_test.cc | #include "xla/service/host_offload_legalize.h"
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_value.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsEntryComputationParameter(HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kParameter &&
instruction->parent()->IsEntryComputation();
}
constexpr std::array<HloOpcode, 2> kUsersOpcodes = {HloOpcode::kSlice,
HloOpcode::kDynamicSlice};
HloInstruction* FindToHostAnnotationToUpdate(HloInstruction* instr) {
while (!instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
if ((instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kCopy &&
instr->opcode() != HloOpcode::kReshape) ||
instr->mutable_operand(0)->user_count() != 1) {
return nullptr;
}
instr = instr->mutable_operand(0);
}
return instr;
}
HloInstruction* FindToDeviceAnnotationToUpdate(HloInstruction* instr) {
while (!instr->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
if (instr->user_count() != 1 ||
(instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kCopy &&
!absl::c_linear_search(kUsersOpcodes, instr->opcode()))) {
return nullptr;
}
instr = instr->users()[0];
}
return instr;
}
HloInstruction* FindDUSFromAnnotation(HloInstruction* instr) {
while (instr->opcode() != HloOpcode::kDynamicUpdateSlice) {
if (instr->user_count() != 1 || (instr->opcode() != HloOpcode::kBitcast &&
instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kCopy)) {
break;
}
instr = instr->users()[0];
}
return instr;
}
absl::StatusOr<bool> DuplicateBroadcastForEachUse(HloModule* module) {
bool split_at_least_one = false;
for (HloComputation* computation : module->computations()) {
std::vector<HloInstruction*> broadcasts;
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kBroadcast ||
!instruction->HasConstantOperand()) {
continue;
}
broadcasts.push_back(instruction);
}
for (HloInstruction* instruction : broadcasts) {
if (instruction->opcode() != HloOpcode::kBroadcast ||
!instruction->HasConstantOperand()) {
continue;
}
absl::InlinedVector<HloUse, 8> uses;
for (HloInstruction* user : instruction->users()) {
for (int64_t i = 0; i < user->operand_count(); ++i) {
if (user->operand(i) != instruction) {
continue;
}
uses.push_back(HloUse{user, i, {}});
}
}
if (uses.size() <= 1) {
VLOG(5) << "Skipping broadcast " << instruction->ToString()
<< " which has " << uses.size() << " uses";
continue;
}
VLOG(5) << "Splitting broadcast " << instruction->ToString()
<< " which has " << uses.size() << " uses";
split_at_least_one = true;
for (int i = 1; i < uses.size(); ++i) {
const HloUse& use = uses[i];
HloInstruction* new_broadcast =
instruction->parent()->AddInstruction(instruction->Clone());
VLOG(5) << "New broadcast " << new_broadcast->ToString();
TF_RETURN_IF_ERROR(use.instruction->ReplaceOperandWith(
use.operand_number, new_broadcast));
}
}
}
return split_at_least_one;
}
struct InstructionAndIndex {
HloInstruction* instruction;
int index;
InstructionAndIndex(HloInstruction* instruction, int index)
: instruction(instruction), index(index) {}
bool operator==(const InstructionAndIndex& other) const {
return instruction == other.instruction && index == other.index;
}
};
absl::StatusOr<InstructionAndIndex> WalkUpMemoryOffload(
InstructionAndIndex current_value, const CallGraph& call_graph) {
auto& [instruction, index] = current_value;
switch (instruction->opcode()) {
case HloOpcode::kGetTupleElement: {
CHECK_EQ(index, -1);
return InstructionAndIndex(instruction->mutable_operand(0),
instruction->tuple_index());
}
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
case HloOpcode::kCopy: {
return InstructionAndIndex(instruction->mutable_operand(0), index);
}
case HloOpcode::kTuple: {
return InstructionAndIndex(instruction->mutable_operand(index), -1);
}
case HloOpcode::kOptimizationBarrier: {
return InstructionAndIndex(instruction->mutable_operand(0), index);
}
case HloOpcode::kWhile: {
HloComputation* while_body = instruction->while_body();
HloInstruction* root = while_body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
return InstructionAndIndex(root, index);
}
case HloOpcode::kParameter: {
CHECK_NE(instruction->parent(),
instruction->GetModule()->entry_computation());
std::vector<HloInstruction*> callers =
call_graph.GetComputationCallers(instruction->parent());
if (callers.size() != 1) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller");
}
HloInstruction* caller = callers[0];
if (caller->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(
"Expected to be called by a while loop");
}
return InstructionAndIndex(caller->mutable_operand(0), index);
}
case HloOpcode::kDynamicUpdateSlice: {
return InstructionAndIndex(instruction->mutable_operand(0), index);
}
case HloOpcode::kCustomCall: {
if (!instruction->IsCustomCall("AllocateBuffer") &&
!instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
return absl::InvalidArgumentError(
"Expected AllocateBuffer or MoveToHost custom-call");
}
return InstructionAndIndex(instruction, index);
}
case HloOpcode::kBroadcast: {
HloInstruction* broadcast_operand = instruction->mutable_operand(0);
if (broadcast_operand->opcode() != HloOpcode::kConstant) {
return absl::InvalidArgumentError("Expected a constant as operand");
}
if (!ShapeUtil::IsEffectiveScalar(broadcast_operand->shape())) {
return absl::InvalidArgumentError("Expected a scalar broadcast");
}
return InstructionAndIndex(instruction, index);
}
default: {
return absl::InvalidArgumentError(
absl::StrFormat("Invalid opcode %s", instruction->ToString()));
}
}
}
absl::StatusOr<std::vector<InstructionAndIndex>> WalkDownMemoryOffload(
const InstructionAndIndex& current_value, const CallGraph& call_graph,
bool for_move_copy_phase) {
VLOG(6) << "Getting users of: \"" << current_value.instruction->ToString()
<< "\" at index " << current_value.index;
std::vector<InstructionAndIndex> results;
auto add_gte_for_idx = [&results](HloInstruction* instr,
int idx) -> absl::Status {
HloInstruction* gte = nullptr;
for (HloInstruction* user : instr->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return absl::InvalidArgumentError(
"Expected users to be only get-tuple-elements");
}
if (user->tuple_index() != idx) {
continue;
}
if (gte != nullptr) {
return absl::InvalidArgumentError(
"Expected to find only one gte per index.");
}
results.emplace_back(user, -1);
}
return absl::OkStatus();
};
if (current_value.instruction->user_count() == 0) {
if (current_value.instruction->IsRoot() &&
!current_value.instruction->parent()->IsEntryComputation()) {
std::vector<HloInstruction*> callers =
call_graph.GetComputationCallers(current_value.instruction->parent());
if (callers.size() != 1 || callers[0]->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(absl::StrFormat(
"Expected computation \"%s\" to be called only by one caller "
"and that caller to be a While. There are %d caller(s): [%s]",
current_value.instruction->parent()->name(), callers.size(),
absl::StrJoin(callers, ", ",
[](std::string* out, const HloInstruction* instr) {
absl::StrAppend(out, instr->name());
})));
}
TF_RETURN_IF_ERROR(add_gte_for_idx(callers[0], current_value.index));
return results;
}
}
if (current_value.instruction->opcode() == HloOpcode::kParameter &&
current_value.instruction->shape().IsTuple()) {
TF_RETURN_IF_ERROR(
add_gte_for_idx(current_value.instruction, current_value.index));
return results;
}
for (HloInstruction* user : current_value.instruction->users()) {
switch (user->opcode()) {
case HloOpcode::kGetTupleElement: {
CHECK_NE(user->tuple_index(), -1);
if (user->tuple_index() != current_value.index) {
continue;
}
results.emplace_back(user, -1);
break;
}
case HloOpcode::kTuple: {
auto output_indices = user->OperandIndices(current_value.instruction);
if (output_indices.size() != 1) {
return absl::InvalidArgumentError(
"Expected operand to be used only once in the tuple.");
}
results.emplace_back(user, output_indices[0]);
break;
}
case HloOpcode::kOptimizationBarrier: {
results.emplace_back(user, current_value.index);
break;
}
case HloOpcode::kWhile: {
HloComputation* while_body = user->while_body();
HloInstruction* parameter = while_body->parameter_instruction(0);
results.emplace_back(parameter, current_value.index);
break;
}
case HloOpcode::kDynamicUpdateSlice: {
if (user->OperandIndices(current_value.instruction)[0] != 0) {
return absl::InvalidArgumentError(
"Expected to be used by first operand of dynamic-update-slice");
}
results.emplace_back(user, current_value.index);
break;
}
case HloOpcode::kCustomCall: {
if (user->IsCustomCall(host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget)) {
results.emplace_back(user, current_value.index);
break;
}
return absl::InvalidArgumentError("Invalid custom-call found.");
}
case HloOpcode::kBitcast:
case HloOpcode::kCopy:
case HloOpcode::kDynamicSlice:
case HloOpcode::kReshape:
case HloOpcode::kSlice: {
results.emplace_back(user, current_value.index);
break;
}
case HloOpcode::kAsyncStart: {
if (user->async_execution_thread() == HloInstruction::kHostThread) {
CHECK(!for_move_copy_phase)
<< "Transpose copy going into host call is not supported yet.";
break;
}
[[fallthrough]];
}
default: {
return absl::InvalidArgumentError(
absl::StrFormat("Unrecognized user name: %s", user->name()));
}
}
}
return results;
}
void UpdateInstructionLayout(const InstructionAndIndex& instruction_and_index,
const Layout& new_layout) {
HloInstruction* instruction = instruction_and_index.instruction;
const int index = instruction_and_index.index;
VLOG(2) << " Updating " << instruction->name() << "'s layout "
<< instruction->shape().ToString(true) << " at index " << index
<< " to " << new_layout.ToString();
if (index != -1) {
*instruction->mutable_shape()
->mutable_tuple_shapes(index)
->mutable_layout() = new_layout;
} else {
VLOG(5) << " Instruction: " << instruction->ToString();
VLOG(5) << " New layout: " << new_layout.ToString();
*instruction->mutable_shape()->mutable_layout() = new_layout;
}
VLOG(3) << " Shape is now: " << instruction->shape().ToString(true);
if (instruction->opcode() == HloOpcode::kWhile) {
*instruction->while_body()
->root_instruction()
->mutable_shape()
->mutable_tuple_shapes(index)
->mutable_layout() = new_layout;
*instruction->while_condition()
->parameter_instruction(0)
->mutable_shape()
->mutable_tuple_shapes(index)
->mutable_layout() = new_layout;
}
}
Shape RemoveMajormostDimension(const Shape& shape) {
CHECK(shape.has_layout()) << "Shape must have layout.";
const int size = shape.layout().minor_to_major_size();
const int64_t majormost_dim = shape.layout().minor_to_major(size - 1);
return ShapeUtil::DeleteDimension(majormost_dim, shape);
}
absl::Status MoveCopy(
const InstructionAndIndex& copy_to_move_instruction_and_index,
const CallGraph* call_graph,
absl::flat_hash_set<HloInstruction*>& processed_annotations,
absl::flat_hash_set<HloInstruction*>& to_remove) {
HloInstruction* copy_to_move = copy_to_move_instruction_and_index.instruction;
VLOG(5) << "Moving copy: " << copy_to_move->ToString();
struct InstructionAndShapes {
InstructionAndShapes(InstructionAndIndex idx, Shape s_before, Shape s_after)
: instruction_and_index(idx),
shape_before_copy(s_before),
shape_after_copy(s_after) {}
InstructionAndIndex instruction_and_index;
Shape shape_before_copy;
Shape shape_after_copy;
};
std::vector<InstructionAndShapes> stack = {InstructionAndShapes(
copy_to_move_instruction_and_index, copy_to_move->operand(0)->shape(),
copy_to_move->shape())};
while (!stack.empty()) {
InstructionAndShapes current_instruction_and_shapes = stack.back();
InstructionAndIndex current_instruction_and_index =
current_instruction_and_shapes.instruction_and_index;
stack.pop_back();
VLOG(5) << "Current top of stack: "
<< current_instruction_and_index.instruction->ToString()
<< ", index: " << current_instruction_and_index.index;
absl::StatusOr<std::vector<InstructionAndIndex>> current_value_down =
WalkDownMemoryOffload(current_instruction_and_index, *call_graph,
true);
if (!current_value_down.ok()) {
VLOG(5) << "WalkDownMemoryOffload failed: "
<< current_value_down.status();
break;
}
for (InstructionAndIndex& instruction_and_index :
current_value_down.value()) {
HloInstruction* instruction = instruction_and_index.instruction;
Shape shape_before_copy =
current_instruction_and_shapes.shape_before_copy;
Shape shape_after_copy = current_instruction_and_shapes.shape_after_copy;
VLOG(5) << "Evaluating successor: " << instruction->ToString();
const int index = instruction_and_index.index;
if (instruction->opcode() == HloOpcode::kBitcast) {
const Shape& before_bitcast_shape = instruction->operand(0)->shape();
const Shape& after_bitcast_shape = instruction->shape();
if (!Shape::Equal().IgnoreLayout()(copy_to_move->operand(0)->shape(),
copy_to_move->shape())) {
return absl::InternalError(absl::StrFormat(
"Expecting copy to only change instructions layout. Copy: %s",
copy_to_move->ToString()));
}
if (after_bitcast_shape.rank() != before_bitcast_shape.rank() - 1) {
return absl::InternalError(
absl::StrFormat("Only handling bitcasts which remove 0'th "
"dimension. This bitcast is \"%s\"",
instruction->ToString()));
}
if (!(ShapeUtil::IsEffectivelyMostMajorDimension(before_bitcast_shape,
0) &&
before_bitcast_shape.dimensions(0) == 1)) {
return absl::InternalError(
absl::StrFormat("Only handling bitcasts with majormost dimension "
"of size 1. This bitcast is \"%s\"",
instruction->ToString()));
}
const Shape new_bitcast_shape =
RemoveMajormostDimension(shape_before_copy);
VLOG(2) << absl::StreamFormat(
" Encountered bitcast \"%s\", updating current shape from %s to %s",
instruction->name(), shape_before_copy.ToString(true),
new_bitcast_shape.ToString(true));
shape_before_copy = new_bitcast_shape;
const Shape new_copy_shape = RemoveMajormostDimension(shape_after_copy);
VLOG(2) << absl::StreamFormat(
" Also updating shape after copy from %s to %s",
shape_after_copy.ToString(true), new_copy_shape.ToString(true));
shape_after_copy = new_copy_shape;
} else if (instruction->opcode() == HloOpcode::kSlice ||
instruction->opcode() == HloOpcode::kDynamicSlice) {
Shape new_copy_shape = instruction->shape();
*new_copy_shape.mutable_layout() = shape_after_copy.layout();
VLOG(2) << absl::StreamFormat(
" Encountered %s \"%s\", updating shape after copy from "
"%s to %s",
HloOpcodeString(instruction->opcode()), instruction->name(),
shape_after_copy.ToString(true), new_copy_shape.ToString(true));
shape_after_copy = new_copy_shape;
}
UpdateInstructionLayout(instruction_and_index,
shape_before_copy.layout());
if (instruction->opcode() == HloOpcode::kParameter) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(instruction->parent());
if (callers.size() != 1) {
return absl::InvalidArgumentError(
"Expected to be called only by one caller");
}
HloInstruction* caller = callers[0];
UpdateInstructionLayout(InstructionAndIndex(caller, index),
shape_before_copy.layout());
}
CHECK_NE(instruction->opcode(), HloOpcode::kCopy)
<< "Copies should be processed in reverse order so this never "
"happens";
if (absl::c_linear_search(kUsersOpcodes, instruction->opcode()) ||
instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
HloInstruction* annotation =
FindToDeviceAnnotationToUpdate(instruction);
CHECK_NE(annotation, nullptr)
<< "We already verified we could find an annotation here. "
"Something went wrong.";
HloInstruction* new_annotation = nullptr;
if (instruction->opcode() == HloOpcode::kCustomCall) {
new_annotation = annotation;
} else {
new_annotation =
instruction->AddInstruction(annotation->CloneWithNewOperands(
instruction->shape(), {instruction}));
}
UpdateInstructionLayout(InstructionAndIndex(new_annotation, -1),
shape_before_copy.layout());
VLOG(3) << absl::StreamFormat("Creating copy with shape %s",
shape_after_copy.ToString(true));
HloInstruction* new_copy =
instruction->AddInstruction(copy_to_move->CloneWithNewOperands(
shape_after_copy, {new_annotation}));
VLOG(2) << absl::StreamFormat("Inserting copy \"%s\" after \"%s\"",
new_copy->name(), instruction->name());
std::vector<HloInstruction*> users = instruction->users();
for (HloInstruction* use : users) {
if (use == new_copy || use == new_annotation) {
continue;
}
TF_RETURN_IF_ERROR(
instruction->ReplaceUseWithDifferentShape(use, new_copy));
}
if (new_annotation != annotation) {
TF_RETURN_IF_ERROR(annotation->ReplaceAllUsesWithDifferentShape(
annotation->mutable_operand(0)));
to_remove.insert(annotation);
}
continue;
}
if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
HloInstruction* annotation =
FindToHostAnnotationToUpdate(instruction->mutable_operand(1));
if (annotation == nullptr) {
return absl::InternalError("Annotation not found.");
}
CHECK(annotation->opcode() == HloOpcode::kCustomCall);
HloInstruction* new_annotation =
instruction->AddInstruction(annotation->CloneWithNewOperands(
instruction->operand(1)->shape(),
{instruction->mutable_operand(1)}));
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(1, new_annotation));
TF_RETURN_IF_ERROR(
annotation->ReplaceAllUsesWith(annotation->mutable_operand(0)));
processed_annotations.insert(annotation);
processed_annotations.insert(new_annotation);
to_remove.insert(annotation);
if (instruction->shape().layout().minor_to_major() !=
instruction->operand(1)->shape().layout().minor_to_major()) {
HloInstruction* update_slice = instruction->mutable_operand(1);
CHECK(update_slice->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget));
*update_slice->mutable_shape()->mutable_layout() =
instruction->shape().layout();
HloInstruction* new_copy =
update_slice->AddInstruction(HloInstruction::CreateUnary(
update_slice->shape(), HloOpcode::kCopy,
update_slice->mutable_operand(0)));
TF_RETURN_IF_ERROR(update_slice->ReplaceOperandWith(0, new_copy));
}
}
stack.emplace_back(instruction_and_index, shape_before_copy,
shape_after_copy);
}
}
VLOG(2) << absl::StreamFormat("Removing copy \"%s\"",
copy_to_move->ToString());
TF_RETURN_IF_ERROR(copy_to_move->ReplaceAllUsesWithDifferentShape(
copy_to_move->mutable_operand(0)));
TF_RETURN_IF_ERROR(copy_to_move->parent()->RemoveInstruction(copy_to_move));
return absl::OkStatus();
}
absl::StatusOr<bool> ProcessAnnotationForCopyMovement(
HloInstruction* instruction, const CallGraph* call_graph,
absl::flat_hash_set<HloInstruction*>& processed_annotations,
absl::flat_hash_set<HloInstruction*>& to_remove) {
VLOG(2) << "Walking down graph starting at instruction "
<< instruction->name();
if (instruction->IsRoot()) {
return false;
}
if (instruction->user_count() == 0) {
return false;
}
HloInstruction* starting_instr =
FindDUSFromAnnotation(instruction->users().at(0));
if (starting_instr->opcode() != HloOpcode::kDynamicUpdateSlice) {
starting_instr = instruction;
}
if (!(starting_instr->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget) ||
IsEntryComputationParameter(starting_instr) ||
starting_instr->opcode() == HloOpcode::kDynamicUpdateSlice)) {
return absl::InternalError(
"Starting instruction must be a move-to-host annotation, entry "
"computation parameter, or dynamic-update-slice.");
}
VLOG(2) << "Effective starting instruction: " << starting_instr->name();
InstructionAndIndex current_value(starting_instr, -1);
processed_annotations.insert(current_value.instruction);
if (current_value.instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
while (true) {
VLOG(10) << "Current value before: "
<< current_value.instruction->ToString();
absl::StatusOr<InstructionAndIndex> current_value_up =
WalkUpMemoryOffload(current_value, *call_graph);
if (!current_value_up.ok()) {
return false;
}
if (current_value_up.value() == current_value) {
break;
}
current_value = current_value_up.value();
VLOG(10) << "Current value after: "
<< current_value.instruction->ToString();
HloInstruction* annotation = current_value.instruction;
if (annotation->opcode() == HloOpcode::kDynamicUpdateSlice) {
HloInstruction* real_annotation =
FindToHostAnnotationToUpdate(annotation->mutable_operand(1));
if (!real_annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
return false;
}
}
}
}
std::vector<InstructionAndIndex> copies_to_move;
std::vector<InstructionAndIndex> stack = {current_value};
while (!stack.empty()) {
VLOG(5) << "Current value before down: "
<< stack.back().instruction->ToString() << " "
<< stack.back().index;
if (absl::c_linear_search(kUsersOpcodes,
stack.back().instruction->opcode()) ||
stack.back().instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
HloInstruction* annotation =
FindToDeviceAnnotationToUpdate(stack.back().instruction);
if (!annotation ||
!annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
VLOG(5) << "Couldn't find annotation for consumer instruction in chain";
return false;
}
if (annotation->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) {
for (HloInstruction* user : annotation->users()) {
HloInstruction* root_instruction =
annotation->parent()->root_instruction();
if (root_instruction == user &&
root_instruction->opcode() == HloOpcode::kTuple &&
!root_instruction->parent()->IsEntryComputation()) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(annotation->parent());
if (callers.size() != 1 ||
callers[0]->opcode() != HloOpcode::kWhile) {
return absl::InvalidArgumentError(absl::StrFormat(
"Expected computation \"%s\" to be called only by one caller "
"and that caller to be a While. There are %d caller(s): [%s]",
current_value.instruction->parent()->name(), callers.size(),
absl::StrJoin(
callers, ", ",
[](std::string* out, const HloInstruction* instr) {
absl::StrAppend(out, instr->name());
})));
}
for (int i = 0; i < user->operands().size(); i++) {
if (user->operands()[i] == annotation &&
annotation->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
annotation->operand(0)->operand(0)->opcode() ==
HloOpcode::kParameter &&
annotation->operand(0)->tuple_index() == i) {
user->ReplaceOperandWith(i, annotation->mutable_operand(0))
.IgnoreError();
}
}
}
}
}
stack.pop_back();
continue;
}
absl::StatusOr<std::vector<InstructionAndIndex>> current_value_down =
WalkDownMemoryOffload(stack.back(), *call_graph,
false);
if (!current_value_down.ok()) {
VLOG(5) << "Current value down failed: " << current_value_down.status();
break;
}
stack.pop_back();
stack.insert(stack.end(), current_value_down.value().begin(),
current_value_down.value().end());
for (InstructionAndIndex& instruction_and_index :
current_value_down.value()) {
VLOG(5) << "Current value last down: "
<< stack.back().instruction->ToString();
if (instruction_and_index.instruction->opcode() == HloOpcode::kCopy) {
VLOG(1) << absl::StreamFormat(
" Found a copy to move: \"%s\"",
instruction_and_index.instruction->name());
copies_to_move.push_back(instruction_and_index);
}
}
}
if (copies_to_move.empty()) {
return false;
}
for (auto it = copies_to_move.rbegin(); it != copies_to_move.rend(); ++it) {
TF_RETURN_IF_ERROR(
MoveCopy(*it, call_graph, processed_annotations, to_remove));
}
return true;
}
absl::StatusOr<bool> FixupInterveningCopies(
const std::vector<HloInstruction*>& starting_instructions,
const CallGraph* call_graph) {
absl::flat_hash_set<HloInstruction*> processed_annotations;
absl::flat_hash_set<HloInstruction*> annotations_to_remove;
bool changed = false;
for (HloInstruction* instruction : starting_instructions) {
if (processed_annotations.contains(instruction)) {
continue;
}
TF_ASSIGN_OR_RETURN(bool changed_annotation_for_copy_movement,
ProcessAnnotationForCopyMovement(
instruction, call_graph, processed_annotations,
annotations_to_remove));
changed |= changed_annotation_for_copy_movement;
}
for (HloInstruction* instruction : annotations_to_remove) {
TF_RETURN_IF_ERROR(instruction->parent()->RemoveInstruction(instruction));
}
return changed;
}
}
std::vector<HloInstruction*>
HostOffloadLegalize::FindStartingInstructionsOfHostMemoryOffload(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::vector<HloInstruction*> starting_instructions;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsEntryComputationParameter(instruction)) {
Shape param_shape =
module->entry_computation_layout()
.parameter_layout(instruction->parameter_number())
.shape();
if (param_shape.has_layout() &&
param_shape.layout().memory_space() == kHostMemorySpaceColor) {
starting_instructions.push_back(instruction);
continue;
}
}
if (instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToHostCustomCallTarget)) {
starting_instructions.push_back(instruction);
}
}
}
return starting_instructions;
}
absl::StatusOr<bool> HostOffloadLegalize::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(bool duplicated_at_least_one_broadcast,
DuplicateBroadcastForEachUse(module));
if (duplicated_at_least_one_broadcast) {
changed = true;
}
if (!after_layout_) {
return changed;
}
std::vector<HloInstruction*> starting_instructions =
FindStartingInstructionsOfHostMemoryOffload(module, execution_threads);
VLOG(1) << absl::StreamFormat(
"Starting instructions for host memory offload: [%s]",
absl::StrJoin(starting_instructions, ", ",
[](std::string* out, HloInstruction* instruction) {
return absl::StrAppend(out, instruction->name());
}));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
TF_ASSIGN_OR_RETURN(
bool changed_intervening_copies,
FixupInterveningCopies(starting_instructions, call_graph.get()));
changed |= changed_intervening_copies;
return changed;
}
} | #include "xla/service/host_offload_legalize.h"
#include <cstdint>
#include <stack>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HostOffloadLegalizeTest : public HloTestBase {
protected:
static constexpr int64_t kHostMemorySpaceColor{5};
absl::StatusOr<bool> RunHostOffloadLegalize(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostOffloadLegalize host_offload_legalize(kHostMemorySpaceColor,
true);
return host_offload_legalize.Run(module);
}
void TestShapeHasMemorySpace(const Shape& shape, int64_t memory_space) {
ASSERT_TRUE(shape.has_layout());
EXPECT_EQ(shape.layout().memory_space(), memory_space);
}
bool HaveRemainingOffloadAnnotations(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget,
host_memory_offload_annotations::
kMoveToDeviceCustomCallTarget})) {
return true;
}
}
}
return false;
}
};
TEST_F(HostOffloadLegalizeTest, TestWithAsyncCall) {
const std::string& hlo_string = R"(
HloModule jit_update, entry_computation_layout={(f32[20,3,256,133]{2,3,1,0:T(8,128)S(5)})->(f32[20,3,256,133]{2,1,0,3:T(4,128)}, f32[4096]{0:T(1024)})}
%async_computation {
%param_0 = f32[20,3,256,133] parameter(0)
ROOT %offloaded-custom-call = f32[4096] custom-call(%param_0), custom_call_target="HostExecute"
}, execution_thread="host"
ENTRY main {
%param.246 = f32[20,3,256,133] parameter(0)
%async-start = ((f32[20,3,256,133]), f32[4096], u32[]) async-start(%param.246), async_execution_thread="host", calls=%async_computation
%async-done = f32[4096] custom-call-done(%async-start)
copy.16744 = f32[20,3,256,133]{2,1,0,3:T(4,128)} copy(param.246)
custom-call.7832 = f32[20,3,256,133]{2,1,0,3:T(4,128)} custom-call(copy.16744), custom_call_target="MoveToDevice"
ROOT tuple.16745 = (f32[20,3,256,133]{2,1,0,3:T(4,128)}, f32[4096]{0:T(1024)}) tuple(custom-call.7832, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* custom_call =
FindInstruction(module.get(), "custom-call.7832");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, NoCopyWithOptBarrierMoreElaborate) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1})->f32[16,256]{1,0}}
ENTRY main.24 {
Arg_0.1 = f32[16,256]{0,1} parameter(0)
cosine.4 = f32[16,256]{0,1} cosine(Arg_0.1)
custom-call.5 = f32[16,256]{0,1} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16,256]{0,1} sine(Arg_0.1)
cosine.7 = f32[16,256]{0,1} cosine(sine.3)
custom-call.8 = f32[16,256]{0,1} custom-call(cosine.7), custom_call_target="MoveToHost"
sine.6 = f32[16,256]{0,1} sine(sine.3)
cosine.9 = f32[16,256]{0,1} cosine(sine.6)
custom-call.10 = f32[16,256]{0,1} custom-call(cosine.9), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
cp = f32[16,256]{1,0} copy(custom-call.8)
tuple.11 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{0,1}, f32[]) tuple(custom-call.5, cp, custom-call.10, constant.2)
opt-barrier.12 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{0,1}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16,256]{0,1} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16,256]{0,1} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16,256]{0,1} multiply(broadcast.20, custom-call.19)
cp2 = f32[16,256]{1,0} copy(multiply.21)
get-tuple-element.14 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16,256]{1,0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16,256]{1,0} multiply(cp2, custom-call.18)
get-tuple-element.13 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16,256]{0,1} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
cp3 = f32[16,256]{1,0} copy(custom-call.17)
ROOT multiply.23 = f32[16,256]{1,0} multiply(multiply.22, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.18");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(), LayoutUtil::MakeLayout({0, 1}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
}
TEST_F(HostOffloadLegalizeTest, XposeCopyOnParameterStreaming) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[16,256]{0,1},f32[16,256]{0,1:T(8,128)S(5)})->f32[16,256]{1,0}}
ENTRY main.24 {
Arg_0.1 = f32[16,256]{0,1} parameter(0)
Arg_0.2 = f32[16,256]{0,1:T(8,128)} parameter(1)
cp0 = f32[16,256]{1,0} copy(Arg_0.2)
cosine.4 = f32[16,256]{0,1} cosine(Arg_0.1)
custom-call.5 = f32[16,256]{0,1} custom-call(cosine.4), custom_call_target="MoveToHost"
sine.3 = f32[16,256]{0,1} sine(Arg_0.1)
cosine.7 = f32[16,256]{0,1} cosine(sine.3)
custom-call.8 = f32[16,256]{0,1} custom-call(cosine.7), custom_call_target="MoveToHost"
constant.2 = f32[] constant(1)
cp1 = f32[16,256]{1,0} copy(custom-call.8)
tuple.11 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{1,0}, f32[]) tuple(custom-call.5, cp1, cp0, constant.2)
opt-barrier.12 = (f32[16,256]{0,1}, f32[16,256]{1,0}, f32[16,256]{1,0}, f32[]) opt-barrier(tuple.11)
get-tuple-element.16 = f32[] get-tuple-element(opt-barrier.12), index=3
broadcast.20 = f32[16,256]{0,1} broadcast(get-tuple-element.16), dimensions={}
get-tuple-element.15 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=2
custom-call.19 = f32[16,256]{1,0} custom-call(get-tuple-element.15), custom_call_target="MoveToDevice"
multiply.21 = f32[16,256]{0,1} multiply(broadcast.20, custom-call.19)
cp2 = f32[16,256]{1,0} copy(multiply.21)
get-tuple-element.14 = f32[16,256]{1,0} get-tuple-element(opt-barrier.12), index=1
custom-call.18 = f32[16,256]{1,0} custom-call(get-tuple-element.14), custom_call_target="MoveToDevice"
multiply.22 = f32[16,256]{1,0} multiply(cp2, custom-call.18)
get-tuple-element.13 = f32[16,256]{0,1} get-tuple-element(opt-barrier.12), index=0
custom-call.17 = f32[16,256]{0,1} custom-call(get-tuple-element.13), custom_call_target="MoveToDevice"
cp3 = f32[16,256]{1,0} copy(custom-call.17)
ROOT multiply.23 = f32[16,256]{1,0} multiply(multiply.22, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.18");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(), LayoutUtil::MakeLayout({0, 1}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
custom_call = FindInstruction(module.get(), "custom-call.19");
ASSERT_NE(custom_call, nullptr);
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({0, 1}, {}, {}, {}, {Tile{{8, 128}}}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({1, 0}));
}
TEST_F(HostOffloadLegalizeTest, DUSSameLayoutForOperandAndUpdate_1) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(bf16[16,512,532]{1,2,0})->bf16[1,16,512,532]{2,3,1,0}}
ENTRY main.24 {
constant_0 = s32[] constant(0)
cs0 = bf16[] constant(0)
broadcast = bf16[20,16,512,532]{3,2,1,0} broadcast(cs0), dimensions={}
cp = bf16[20,16,512,532]{3,2,1,0} copy(broadcast)
custom-call.8 = bf16[20,16,512,532]{3,2,1,0} custom-call(cp), custom_call_target="MoveToHost"
copy = bf16[20,16,512,532]{2,3,1,0} copy(custom-call.8)
arg1 = bf16[16,512,532]{1,2,0} parameter(0)
copy.17302 = bf16[16,512,532]{2,1,0} copy(arg1)
bitcast.6100 = bf16[1,16,512,532]{3,2,1,0} bitcast(copy.17302)
copy.20241 = bf16[1,16,512,532]{2,3,1,0} copy(bitcast.6100)
custom-call.6720 = bf16[1,16,512,532]{2,3,1,0} custom-call(copy.20241), custom_call_target="MoveToHost"
dynamic-update-slice.6830 = bf16[20,16,512,532]{2,3,1,0} dynamic-update-slice(copy, custom-call.6720, constant_0, constant_0, constant_0, constant_0)
dynamic_slice_0 = bf16[1,16,512,532]{2,3,1,0} dynamic-slice(dynamic-update-slice.6830, constant_0, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,16,512,532}
ROOT custom_call_0.1 = bf16[1,16,512,532]{2,3,1,0} custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* dus =
FindInstruction(module.get(), "dynamic-update-slice.6830");
ASSERT_NE(dus, nullptr);
EXPECT_EQ(dus->operand(0)->shape().layout(),
dus->operand(1)->shape().layout());
EXPECT_EQ(dus->shape().layout(), dus->operand(1)->shape().layout());
const HloInstruction* custom_call =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_TRUE(custom_call->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget));
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({3, 2, 1, 0}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({2, 3, 1, 0}));
}
TEST_F(HostOffloadLegalizeTest, DUSSameLayoutForOperandAndUpdate_2) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(bf16[16,512,532]{1,2,0})->bf16[1,16,512,532]{2,3,1,0}}
ENTRY main.24 {
constant_0 = s32[] constant(0)
cs0 = bf16[] constant(0)
broadcast = bf16[20,16,512,532]{3,2,1,0} broadcast(cs0), dimensions={}
cp = bf16[20,16,512,532]{3,2,1,0} copy(broadcast)
custom-call.8 = bf16[20,16,512,532]{3,2,1,0} custom-call(cp), custom_call_target="MoveToHost"
copy = bf16[20,16,512,532]{2,3,1,0} copy(custom-call.8)
arg1 = bf16[16,512,532]{1,2,0} parameter(0)
copy.17302 = bf16[16,512,532]{2,1,0} copy(arg1)
custom-call.6720 = bf16[16,512,532]{2,1,0} custom-call(copy.17302), custom_call_target="MoveToHost"
bitcast.6100 = bf16[1,16,512,532]{3,2,1,0} bitcast(custom-call.6720)
copy.20241 = bf16[1,16,512,532]{2,3,1,0} copy(bitcast.6100)
dynamic-update-slice.6830 = bf16[20,16,512,532]{2,3,1,0} dynamic-update-slice(copy, copy.20241, constant_0, constant_0, constant_0, constant_0)
dynamic_slice_0 = bf16[1,16,512,532]{2,3,1,0} dynamic-slice(dynamic-update-slice.6830, constant_0, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,16,512,532}
ROOT custom_call_0.1 = bf16[1,16,512,532]{2,3,1,0} custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* dus =
FindInstruction(module.get(), "dynamic-update-slice.6830");
ASSERT_NE(dus, nullptr);
EXPECT_EQ(dus->operand(0)->shape().layout(),
dus->operand(1)->shape().layout());
EXPECT_EQ(dus->shape().layout(), dus->operand(1)->shape().layout());
const HloInstruction* custom_call =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_TRUE(custom_call->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget));
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({3, 2, 1, 0}));
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({2, 3, 1, 0}));
}
TEST_F(HostOffloadLegalizeTest, LlmActivationHostMemoryMultipleConsumers) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
cs0 = f32[] constant(0)
broadcast_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} broadcast(cs0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(producing_while), index=1
cp = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(while_output_1)
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(constant_s32_0, cp)
consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy = FindInstruction(module.get(), HloOpcode::kCopy);
HloInstruction* consuming_while =
FindInstruction(module.get(), "consuming_while");
ASSERT_NE(copy, nullptr);
ASSERT_NE(consuming_while, nullptr);
EXPECT_NE(copy, nullptr);
EXPECT_NE(consuming_while, nullptr);
EXPECT_EQ(copy->parent(), consuming_while->while_body());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, LlmActivationHostMemoryMultipleCopies) {
const std::string& hlo_string = R"(
HloModule llm_while
producing_while_condition {
producing_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
producing_condition_current_iteration_index = s32[] get-tuple-element(producing_condition_param), index=0
producing_condition_iteration_count = s32[] constant(96)
ROOT producing_condition_result = pred[] compare(producing_condition_current_iteration_index, producing_condition_iteration_count), direction=LT
}
consuming_while_condition {
consuming_condition_param = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
consuming_condition_current_iteration_index = s32[] get-tuple-element(consuming_condition_param), index=0
consuming_condition_iteration_count = s32[] constant(96)
ROOT consuming_condition_result = pred[] compare(consuming_condition_current_iteration_index, consuming_condition_iteration_count), direction=LT
}
producing_while_body {
input_tuple.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) parameter(0)
current_iteration_index.0 = s32[] get-tuple-element(input_tuple.0), index=0
data_0.0 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(input_tuple.0), index=1
constant_0.0 = s32[] constant(0)
constant_1.0 = s32[] constant(1)
constant_96 = s32[] constant(96)
slice_data_0 = f32[1,8,6,2048,2048] constant({...})
compare_result.0 = pred[] compare(current_iteration_index.0, constant_0.0), direction=LT
add_result = s32[] add(current_iteration_index.0, constant_96)
select_result.0 = s32[] select(compare_result.0, add_result, current_iteration_index.0)
custom_call_0.0 = f32[1,8,6,2048,2048] custom-call(slice_data_0), custom_call_target="MoveToHost"
dynamic_update_slice_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} dynamic-update-slice(data_0.0, custom_call_0.0, select_result.0, constant_0.0, constant_0.0, constant_0.0, constant_0.0)
incremented_index.0 = s32[] add(current_iteration_index.0, constant_1.0)
ROOT tuple_result.0 = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(incremented_index.0, dynamic_update_slice_0)
}
consuming_while_body {
input_tuple.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) parameter(0)
current_iteration_index.1 = s32[] get-tuple-element(input_tuple.1), index=0
data_0.1 = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(input_tuple.1), index=1
constant_0.1 = s32[] constant(0)
constant_1.1 = s32[] constant(1)
constant_95 = s32[] constant(95)
constant_191 = s32[] constant(191)
subtract_0 = s32[] subtract(constant_95, current_iteration_index.1)
compare_result.1 = pred[] compare(subtract_0, constant_0.1), direction=LT
subtract_1 = s32[] subtract(constant_191, current_iteration_index.1)
select_result.1 = s32[] select(compare_result.1, subtract_1, subtract_0)
dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(data_0.1, select_result.1, constant_0.1, constant_0.1, constant_0.1, constant_0.1), dynamic_slice_sizes={1,8,6,2048,2048}
custom_call_0.1 = f32[1,8,6,2048,2048] custom-call(dynamic_slice_0), custom_call_target="MoveToDevice"
tanh_0 = f32[1,8,6,2048,2048] tanh(custom_call_0.1)
incremented_index.1 = s32[] add(current_iteration_index.1, constant_1.1)
ROOT tuple_result.1 = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(incremented_index.1, data_0.1)
}
ENTRY main {
entry_param_0 = f32[] parameter(0)
entry_param_1 = s32[] parameter(1)
entry_param_2 = s32[] parameter(2)
cs0 = f32[] constant(0)
broadcast_0 = f32[96,8,6,2048,2048]{0,1,2,3,4} broadcast(cs0), dimensions={}
constant_s32_0 = s32[] constant(0)
tuple_for_producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) tuple(constant_s32_0, broadcast_0)
producing_while = (s32[], f32[96,8,6,2048,2048]{0,1,2,3,4}) while(tuple_for_producing_while), condition=producing_while_condition, body=producing_while_body
while_output_1 = f32[96,8,6,2048,2048]{0,1,2,3,4} get-tuple-element(producing_while), index=1
cp = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(while_output_1)
cp1 = f32[96,8,6,2048,2048]{0,1,3,2,4} copy(cp)
tuple_for_consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) tuple(constant_s32_0, cp1)
consuming_while = (s32[], f32[96,8,6,2048,2048]{0,1,3,2,4}) while(tuple_for_consuming_while), condition=consuming_while_condition, body=consuming_while_body
second_while_output = f32[96,8,6,2048,2048]{0,1,3,2,4} get-tuple-element(consuming_while), index=1
final_dynamic_slice_0 = f32[1,8,6,2048,2048] dynamic-slice(second_while_output, entry_param_1, constant_s32_0, constant_s32_0, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,8,6,2048,2048}
final_host_to_device_custom_call_0 = f32[1,8,6,2048,2048] custom-call(final_dynamic_slice_0), custom_call_target="MoveToDevice"
final_slice_0 = f32[1,8,6,2048,2048] slice(second_while_output), slice={[41:42], [0:8], [0:6], [0:2048], [0:2048]}
final_host_to_device_custom_call_1 = f32[1,8,6,2048,2048] custom-call(final_slice_0), custom_call_target="MoveToDevice"
ROOT add = f32[1,8,6,2048,2048] add(final_host_to_device_custom_call_0, final_host_to_device_custom_call_1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_0 = FindInstruction(module.get(), "cp.2");
HloInstruction* copy_1 = FindInstruction(module.get(), "cp1.2");
HloInstruction* consuming_while =
FindInstruction(module.get(), "consuming_while");
ASSERT_NE(copy_0, nullptr);
ASSERT_NE(copy_1, nullptr);
ASSERT_NE(consuming_while, nullptr);
EXPECT_NE(copy_0, nullptr);
EXPECT_NE(copy_1, nullptr);
EXPECT_NE(consuming_while, nullptr);
EXPECT_EQ(copy_0->parent(), module->entry_computation());
EXPECT_EQ(copy_1->operand(0), copy_0);
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(HostOffloadLegalizeTest, MoveCopyOverBitcast) {
const std::string& hlo_string = R"(
HloModule jit_f, entry_computation_layout={(bf16[1,1,16384,4,256]{4,3,2,1,0:T(4,128)(2,1)S(5)})->bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)}}
ENTRY main {
param = bf16[1,1,16384,4,256]{4,3,2,1,0:T(4,128)(2,1)} parameter(0)
copy = bf16[1,1,16384,4,256]{4,2,3,1,0:T(8,128)(2,1)} copy(param)
bitcast = bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)} bitcast(copy)
custom-call = bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)} custom-call(bitcast), custom_call_target="MoveToDevice"
ROOT add = bf16[1,16384,4,256]{3,1,2,0:T(8,128)(2,1)} add(custom-call, custom-call)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHostOffloadLegalize(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
EXPECT_EQ(custom_call->shape().layout(),
LayoutUtil::MakeLayout({3, 2, 1, 0}, {}, {}, {},
{Tile{{4, 128}}, Tile{{2, 1}}}));
EXPECT_EQ(custom_call->users()[0]->opcode(), HloOpcode::kCopy);
EXPECT_EQ(custom_call->users()[0]->shape().layout(),
LayoutUtil::MakeLayout({3, 1, 2, 0}, {}, {}, {},
{Tile{{8, 128}}, Tile{{2, 1}}}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_legalize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_legalize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9cbfb9af-6194-4234-b240-8a2e574acaef | cpp | google/quiche | quiche_ip_address | quiche/common/quiche_ip_address.cc | quiche/common/quiche_ip_address_test.cc | #include "quiche/common/quiche_ip_address.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <string>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_ip_address_family.h"
namespace quiche {
QuicheIpAddress QuicheIpAddress::Loopback4() {
QuicheIpAddress result;
result.family_ = IpAddressFamily::IP_V4;
result.address_.bytes[0] = 127;
result.address_.bytes[1] = 0;
result.address_.bytes[2] = 0;
result.address_.bytes[3] = 1;
return result;
}
QuicheIpAddress QuicheIpAddress::Loopback6() {
QuicheIpAddress result;
result.family_ = IpAddressFamily::IP_V6;
uint8_t* bytes = result.address_.bytes;
memset(bytes, 0, 15);
bytes[15] = 1;
return result;
}
QuicheIpAddress QuicheIpAddress::Any4() {
in_addr address;
memset(&address, 0, sizeof(address));
return QuicheIpAddress(address);
}
QuicheIpAddress QuicheIpAddress::Any6() {
in6_addr address;
memset(&address, 0, sizeof(address));
return QuicheIpAddress(address);
}
QuicheIpAddress::QuicheIpAddress() : family_(IpAddressFamily::IP_UNSPEC) {}
QuicheIpAddress::QuicheIpAddress(const in_addr& ipv4_address)
: family_(IpAddressFamily::IP_V4) {
address_.v4 = ipv4_address;
}
QuicheIpAddress::QuicheIpAddress(const in6_addr& ipv6_address)
: family_(IpAddressFamily::IP_V6) {
address_.v6 = ipv6_address;
}
bool operator==(QuicheIpAddress lhs, QuicheIpAddress rhs) {
if (lhs.family_ != rhs.family_) {
return false;
}
switch (lhs.family_) {
case IpAddressFamily::IP_V4:
return std::equal(lhs.address_.bytes,
lhs.address_.bytes + QuicheIpAddress::kIPv4AddressSize,
rhs.address_.bytes);
case IpAddressFamily::IP_V6:
return std::equal(lhs.address_.bytes,
lhs.address_.bytes + QuicheIpAddress::kIPv6AddressSize,
rhs.address_.bytes);
case IpAddressFamily::IP_UNSPEC:
return true;
}
QUICHE_BUG(quiche_bug_10126_2)
<< "Invalid IpAddressFamily " << static_cast<int32_t>(lhs.family_);
return false;
}
bool operator!=(QuicheIpAddress lhs, QuicheIpAddress rhs) {
return !(lhs == rhs);
}
bool QuicheIpAddress::IsInitialized() const {
return family_ != IpAddressFamily::IP_UNSPEC;
}
IpAddressFamily QuicheIpAddress::address_family() const { return family_; }
int QuicheIpAddress::AddressFamilyToInt() const {
return ToPlatformAddressFamily(family_);
}
std::string QuicheIpAddress::ToPackedString() const {
switch (family_) {
case IpAddressFamily::IP_V4:
return std::string(address_.chars, sizeof(address_.v4));
case IpAddressFamily::IP_V6:
return std::string(address_.chars, sizeof(address_.v6));
case IpAddressFamily::IP_UNSPEC:
return "";
}
QUICHE_BUG(quiche_bug_10126_3)
<< "Invalid IpAddressFamily " << static_cast<int32_t>(family_);
return "";
}
std::string QuicheIpAddress::ToString() const {
if (!IsInitialized()) {
return "";
}
char buffer[INET6_ADDRSTRLEN] = {0};
const char* result =
inet_ntop(AddressFamilyToInt(), address_.bytes, buffer, sizeof(buffer));
QUICHE_BUG_IF(quiche_bug_10126_4, result == nullptr)
<< "Failed to convert an IP address to string";
return buffer;
}
static const uint8_t kMappedAddressPrefix[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
};
QuicheIpAddress QuicheIpAddress::Normalized() const {
if (!IsIPv6()) {
return *this;
}
if (!std::equal(std::begin(kMappedAddressPrefix),
std::end(kMappedAddressPrefix), address_.bytes)) {
return *this;
}
in_addr result;
memcpy(&result, &address_.bytes[12], sizeof(result));
return QuicheIpAddress(result);
}
QuicheIpAddress QuicheIpAddress::DualStacked() const {
if (!IsIPv4()) {
return *this;
}
QuicheIpAddress result;
result.family_ = IpAddressFamily::IP_V6;
memcpy(result.address_.bytes, kMappedAddressPrefix,
sizeof(kMappedAddressPrefix));
memcpy(result.address_.bytes + 12, address_.bytes, kIPv4AddressSize);
return result;
}
bool QuicheIpAddress::FromPackedString(const char* data, size_t length) {
switch (length) {
case kIPv4AddressSize:
family_ = IpAddressFamily::IP_V4;
break;
case kIPv6AddressSize:
family_ = IpAddressFamily::IP_V6;
break;
default:
return false;
}
memcpy(address_.chars, data, length);
return true;
}
bool QuicheIpAddress::FromString(std::string str) {
for (IpAddressFamily family :
{IpAddressFamily::IP_V6, IpAddressFamily::IP_V4}) {
int result =
inet_pton(ToPlatformAddressFamily(family), str.c_str(), address_.bytes);
if (result > 0) {
family_ = family;
return true;
}
}
return false;
}
bool QuicheIpAddress::IsIPv4() const {
return family_ == IpAddressFamily::IP_V4;
}
bool QuicheIpAddress::IsIPv6() const {
return family_ == IpAddressFamily::IP_V6;
}
bool QuicheIpAddress::InSameSubnet(const QuicheIpAddress& other,
int subnet_length) {
if (!IsInitialized()) {
QUICHE_BUG(quiche_bug_10126_5)
<< "Attempting to do subnet matching on undefined address";
return false;
}
if ((IsIPv4() && subnet_length > 32) || (IsIPv6() && subnet_length > 128)) {
QUICHE_BUG(quiche_bug_10126_6) << "Subnet mask is out of bounds";
return false;
}
int bytes_to_check = subnet_length / 8;
int bits_to_check = subnet_length % 8;
const uint8_t* const lhs = address_.bytes;
const uint8_t* const rhs = other.address_.bytes;
if (!std::equal(lhs, lhs + bytes_to_check, rhs)) {
return false;
}
if (bits_to_check == 0) {
return true;
}
QUICHE_DCHECK_LT(static_cast<size_t>(bytes_to_check), sizeof(address_.bytes));
int mask = (~0u) << (8u - bits_to_check);
return (lhs[bytes_to_check] & mask) == (rhs[bytes_to_check] & mask);
}
in_addr QuicheIpAddress::GetIPv4() const {
QUICHE_DCHECK(IsIPv4());
return address_.v4;
}
in6_addr QuicheIpAddress::GetIPv6() const {
QUICHE_DCHECK(IsIPv6());
return address_.v6;
}
QuicheIpPrefix::QuicheIpPrefix() : prefix_length_(0) {}
QuicheIpPrefix::QuicheIpPrefix(const QuicheIpAddress& address)
: address_(address) {
if (address_.IsIPv6()) {
prefix_length_ = QuicheIpAddress::kIPv6AddressSize * 8;
} else if (address_.IsIPv4()) {
prefix_length_ = QuicheIpAddress::kIPv4AddressSize * 8;
} else {
prefix_length_ = 0;
}
}
QuicheIpPrefix::QuicheIpPrefix(const QuicheIpAddress& address,
uint8_t prefix_length)
: address_(address), prefix_length_(prefix_length) {
QUICHE_DCHECK(prefix_length <= QuicheIpPrefix(address).prefix_length())
<< "prefix_length cannot be longer than the size of the IP address";
}
std::string QuicheIpPrefix::ToString() const {
return absl::StrCat(address_.ToString(), "/", prefix_length_);
}
bool operator==(const QuicheIpPrefix& lhs, const QuicheIpPrefix& rhs) {
return lhs.address_ == rhs.address_ &&
lhs.prefix_length_ == rhs.prefix_length_;
}
bool operator!=(const QuicheIpPrefix& lhs, const QuicheIpPrefix& rhs) {
return !(lhs == rhs);
}
} | #include "quiche/common/quiche_ip_address.h"
#include <cstdint>
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_ip_address_family.h"
namespace quiche {
namespace test {
namespace {
TEST(QuicheIpAddressTest, IPv4) {
QuicheIpAddress ip_address;
EXPECT_FALSE(ip_address.IsInitialized());
EXPECT_TRUE(ip_address.FromString("127.0.52.223"));
EXPECT_TRUE(ip_address.IsInitialized());
EXPECT_EQ(IpAddressFamily::IP_V4, ip_address.address_family());
EXPECT_TRUE(ip_address.IsIPv4());
EXPECT_FALSE(ip_address.IsIPv6());
EXPECT_EQ("127.0.52.223", ip_address.ToString());
const in_addr v4_address = ip_address.GetIPv4();
const uint8_t* const v4_address_ptr =
reinterpret_cast<const uint8_t*>(&v4_address);
EXPECT_EQ(127u, *(v4_address_ptr + 0));
EXPECT_EQ(0u, *(v4_address_ptr + 1));
EXPECT_EQ(52u, *(v4_address_ptr + 2));
EXPECT_EQ(223u, *(v4_address_ptr + 3));
}
TEST(QuicheIpAddressTest, IPv6) {
QuicheIpAddress ip_address;
EXPECT_FALSE(ip_address.IsInitialized());
EXPECT_TRUE(ip_address.FromString("fe80::1ff:fe23:4567"));
EXPECT_TRUE(ip_address.IsInitialized());
EXPECT_EQ(IpAddressFamily::IP_V6, ip_address.address_family());
EXPECT_FALSE(ip_address.IsIPv4());
EXPECT_TRUE(ip_address.IsIPv6());
EXPECT_EQ("fe80::1ff:fe23:4567", ip_address.ToString());
const in6_addr v6_address = ip_address.GetIPv6();
const uint16_t* const v6_address_ptr =
reinterpret_cast<const uint16_t*>(&v6_address);
EXPECT_EQ(0x80feu, *(v6_address_ptr + 0));
EXPECT_EQ(0x0000u, *(v6_address_ptr + 1));
EXPECT_EQ(0x0000u, *(v6_address_ptr + 2));
EXPECT_EQ(0x0000u, *(v6_address_ptr + 3));
EXPECT_EQ(0x0000u, *(v6_address_ptr + 4));
EXPECT_EQ(0xff01u, *(v6_address_ptr + 5));
EXPECT_EQ(0x23feu, *(v6_address_ptr + 6));
EXPECT_EQ(0x6745u, *(v6_address_ptr + 7));
EXPECT_EQ(ip_address, ip_address.Normalized());
EXPECT_EQ(ip_address, ip_address.DualStacked());
}
TEST(QuicheIpAddressTest, FromPackedString) {
QuicheIpAddress loopback4, loopback6;
const char loopback4_packed[] = "\x7f\0\0\x01";
const char loopback6_packed[] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01";
EXPECT_TRUE(loopback4.FromPackedString(loopback4_packed, 4));
EXPECT_TRUE(loopback6.FromPackedString(loopback6_packed, 16));
EXPECT_EQ(loopback4, QuicheIpAddress::Loopback4());
EXPECT_EQ(loopback6, QuicheIpAddress::Loopback6());
}
TEST(QuicheIpAddressTest, MappedAddress) {
QuicheIpAddress ipv4_address;
QuicheIpAddress mapped_address;
EXPECT_TRUE(ipv4_address.FromString("127.0.0.1"));
EXPECT_TRUE(mapped_address.FromString("::ffff:7f00:1"));
EXPECT_EQ(mapped_address, ipv4_address.DualStacked());
EXPECT_EQ(ipv4_address, mapped_address.Normalized());
}
TEST(QuicheIpAddressTest, Subnets) {
struct {
const char* address1;
const char* address2;
int subnet_size;
bool same_subnet;
} test_cases[] = {
{"127.0.0.1", "127.0.0.2", 24, true},
{"8.8.8.8", "127.0.0.1", 24, false},
{"8.8.8.8", "127.0.0.1", 16, false},
{"8.8.8.8", "127.0.0.1", 8, false},
{"8.8.8.8", "127.0.0.1", 2, false},
{"8.8.8.8", "127.0.0.1", 1, true},
{"127.0.0.1", "127.0.0.128", 24, true},
{"127.0.0.1", "127.0.0.128", 25, false},
{"127.0.0.1", "127.0.0.127", 25, true},
{"127.0.0.1", "127.0.0.0", 30, true},
{"127.0.0.1", "127.0.0.1", 30, true},
{"127.0.0.1", "127.0.0.2", 30, true},
{"127.0.0.1", "127.0.0.3", 30, true},
{"127.0.0.1", "127.0.0.4", 30, false},
{"127.0.0.1", "127.0.0.2", 31, false},
{"127.0.0.1", "127.0.0.0", 31, true},
{"::1", "fe80::1", 8, false},
{"::1", "fe80::1", 1, false},
{"::1", "fe80::1", 0, true},
{"fe80::1", "fe80::2", 126, true},
{"fe80::1", "fe80::2", 127, false},
};
for (const auto& test_case : test_cases) {
QuicheIpAddress address1, address2;
ASSERT_TRUE(address1.FromString(test_case.address1));
ASSERT_TRUE(address2.FromString(test_case.address2));
EXPECT_EQ(test_case.same_subnet,
address1.InSameSubnet(address2, test_case.subnet_size))
<< "Addresses: " << test_case.address1 << ", " << test_case.address2
<< "; subnet: /" << test_case.subnet_size;
}
}
TEST(QuicheIpAddress, LoopbackAddresses) {
QuicheIpAddress loopback4;
QuicheIpAddress loopback6;
ASSERT_TRUE(loopback4.FromString("127.0.0.1"));
ASSERT_TRUE(loopback6.FromString("::1"));
EXPECT_EQ(loopback4, QuicheIpAddress::Loopback4());
EXPECT_EQ(loopback6, QuicheIpAddress::Loopback6());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_ip_address.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_ip_address_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |